2024-11-08 00:33:26,643 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca 2024-11-08 00:33:26,655 main DEBUG Took 0.010149 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-11-08 00:33:26,655 main DEBUG PluginManager 'Core' found 129 plugins 2024-11-08 00:33:26,656 main DEBUG PluginManager 'Level' found 0 plugins 2024-11-08 00:33:26,657 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-11-08 00:33:26,658 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-08 00:33:26,664 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-11-08 00:33:26,675 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-08 00:33:26,676 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-08 00:33:26,677 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-08 00:33:26,678 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-08 00:33:26,678 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-08 00:33:26,678 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-08 00:33:26,679 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-08 00:33:26,680 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-08 00:33:26,680 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-08 00:33:26,680 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-08 00:33:26,681 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-08 00:33:26,682 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-08 00:33:26,682 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-08 00:33:26,682 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-08 00:33:26,683 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-08 00:33:26,683 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-08 00:33:26,684 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-08 00:33:26,684 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-08 00:33:26,684 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-08 00:33:26,685 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-08 00:33:26,685 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-08 00:33:26,685 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-08 00:33:26,686 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-08 00:33:26,686 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-08 00:33:26,686 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-08 00:33:26,687 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-11-08 00:33:26,688 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-08 00:33:26,690 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-11-08 00:33:26,691 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-11-08 00:33:26,692 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-11-08 00:33:26,693 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-11-08 00:33:26,693 main DEBUG PluginManager 'Converter' found 47 plugins 2024-11-08 00:33:26,702 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-11-08 00:33:26,704 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-11-08 00:33:26,706 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-11-08 00:33:26,707 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-11-08 00:33:26,707 main DEBUG createAppenders(={Console}) 2024-11-08 00:33:26,708 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca initialized 2024-11-08 00:33:26,708 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca 2024-11-08 00:33:26,708 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca OK. 2024-11-08 00:33:26,709 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-11-08 00:33:26,709 main DEBUG OutputStream closed 2024-11-08 00:33:26,710 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-11-08 00:33:26,710 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-11-08 00:33:26,710 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@6404f418 OK 2024-11-08 00:33:26,784 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-11-08 00:33:26,786 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-11-08 00:33:26,787 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-11-08 00:33:26,789 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-11-08 00:33:26,789 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-11-08 00:33:26,790 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-11-08 00:33:26,790 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-11-08 00:33:26,790 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-11-08 00:33:26,791 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-11-08 00:33:26,791 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-11-08 00:33:26,791 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-11-08 00:33:26,792 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-11-08 00:33:26,792 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-11-08 00:33:26,792 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-11-08 00:33:26,793 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-11-08 00:33:26,793 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-11-08 00:33:26,793 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-11-08 00:33:26,794 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-11-08 00:33:26,796 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-08 00:33:26,797 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-logging/target/hbase-logging-3.0.0-beta-2-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@6dab9b6d) with optional ClassLoader: null 2024-11-08 00:33:26,797 main DEBUG Shutdown hook enabled. Registering a new one. 2024-11-08 00:33:26,798 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@6dab9b6d] started OK. 2024-11-08T00:33:27,009 DEBUG [main {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7a839e03-349b-445b-c5ab-d7393c746cf1 2024-11-08 00:33:27,011 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-11-08 00:33:27,012 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-08T00:33:27,020 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.regionserver.wal.TestLogRolling timeout: 13 mins 2024-11-08T00:33:27,055 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testSlowSyncLogRolling Thread=12, OpenFileDescriptor=287, MaxFileDescriptor=1048576, SystemLoadAverage=75, ProcessCount=11, AvailableMemoryMB=7548 2024-11-08T00:33:27,059 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-08T00:33:27,081 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7a839e03-349b-445b-c5ab-d7393c746cf1/cluster_f213e044-1888-1e6c-6192-3fb82960480e, deleteOnExit=true 2024-11-08T00:33:27,081 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-08T00:33:27,082 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7a839e03-349b-445b-c5ab-d7393c746cf1/test.cache.data in system properties and HBase conf 2024-11-08T00:33:27,083 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7a839e03-349b-445b-c5ab-d7393c746cf1/hadoop.tmp.dir in system properties and HBase conf 2024-11-08T00:33:27,084 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7a839e03-349b-445b-c5ab-d7393c746cf1/hadoop.log.dir in system properties and HBase conf 2024-11-08T00:33:27,085 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7a839e03-349b-445b-c5ab-d7393c746cf1/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-08T00:33:27,086 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7a839e03-349b-445b-c5ab-d7393c746cf1/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-08T00:33:27,086 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-08T00:33:27,185 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-11-08T00:33:27,275 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-08T00:33:27,279 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7a839e03-349b-445b-c5ab-d7393c746cf1/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-08T00:33:27,279 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7a839e03-349b-445b-c5ab-d7393c746cf1/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-08T00:33:27,280 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7a839e03-349b-445b-c5ab-d7393c746cf1/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-08T00:33:27,280 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7a839e03-349b-445b-c5ab-d7393c746cf1/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-08T00:33:27,281 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7a839e03-349b-445b-c5ab-d7393c746cf1/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-08T00:33:27,281 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7a839e03-349b-445b-c5ab-d7393c746cf1/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-08T00:33:27,282 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7a839e03-349b-445b-c5ab-d7393c746cf1/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-08T00:33:27,282 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7a839e03-349b-445b-c5ab-d7393c746cf1/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-08T00:33:27,283 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7a839e03-349b-445b-c5ab-d7393c746cf1/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-08T00:33:27,283 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7a839e03-349b-445b-c5ab-d7393c746cf1/nfs.dump.dir in system properties and HBase conf 2024-11-08T00:33:27,283 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7a839e03-349b-445b-c5ab-d7393c746cf1/java.io.tmpdir in system properties and HBase conf 2024-11-08T00:33:27,284 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7a839e03-349b-445b-c5ab-d7393c746cf1/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-08T00:33:27,284 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7a839e03-349b-445b-c5ab-d7393c746cf1/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-08T00:33:27,285 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7a839e03-349b-445b-c5ab-d7393c746cf1/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-08T00:33:27,746 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-08T00:33:28,343 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-11-08T00:33:28,419 INFO [Time-limited test {}] log.Log(170): Logging initialized @2515ms to org.eclipse.jetty.util.log.Slf4jLog 2024-11-08T00:33:28,490 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-08T00:33:28,545 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-08T00:33:28,562 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-08T00:33:28,563 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-08T00:33:28,564 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-08T00:33:28,575 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-08T00:33:28,577 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@ddc8467{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7a839e03-349b-445b-c5ab-d7393c746cf1/hadoop.log.dir/,AVAILABLE} 2024-11-08T00:33:28,578 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@70be1389{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-08T00:33:28,739 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@735fa16a{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7a839e03-349b-445b-c5ab-d7393c746cf1/java.io.tmpdir/jetty-localhost-44555-hadoop-hdfs-3_4_1-tests_jar-_-any-9258529096714281881/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-08T00:33:28,746 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6c26a5a3{HTTP/1.1, (http/1.1)}{localhost:44555} 2024-11-08T00:33:28,746 INFO [Time-limited test {}] server.Server(415): Started @2843ms 2024-11-08T00:33:28,773 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-08T00:33:29,316 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-08T00:33:29,325 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-08T00:33:29,326 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-08T00:33:29,327 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-08T00:33:29,327 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-08T00:33:29,327 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@28778f0f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7a839e03-349b-445b-c5ab-d7393c746cf1/hadoop.log.dir/,AVAILABLE} 2024-11-08T00:33:29,328 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@371e191c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-08T00:33:29,426 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7b07d1ba{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7a839e03-349b-445b-c5ab-d7393c746cf1/java.io.tmpdir/jetty-localhost-43425-hadoop-hdfs-3_4_1-tests_jar-_-any-5001181853332178745/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-08T00:33:29,427 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@43e0a762{HTTP/1.1, (http/1.1)}{localhost:43425} 2024-11-08T00:33:29,427 INFO [Time-limited test {}] server.Server(415): Started @3524ms 2024-11-08T00:33:29,476 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-08T00:33:29,584 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-08T00:33:29,591 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-08T00:33:29,592 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-08T00:33:29,593 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-08T00:33:29,593 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-08T00:33:29,594 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@11effdcd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7a839e03-349b-445b-c5ab-d7393c746cf1/hadoop.log.dir/,AVAILABLE} 2024-11-08T00:33:29,595 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2d48d695{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-08T00:33:29,695 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1bf97579{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7a839e03-349b-445b-c5ab-d7393c746cf1/java.io.tmpdir/jetty-localhost-40565-hadoop-hdfs-3_4_1-tests_jar-_-any-15240131835583887421/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-08T00:33:29,696 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@22b88bcb{HTTP/1.1, (http/1.1)}{localhost:40565} 2024-11-08T00:33:29,696 INFO [Time-limited test {}] server.Server(415): Started @3793ms 2024-11-08T00:33:29,698 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-08T00:33:30,605 WARN [Thread-99 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7a839e03-349b-445b-c5ab-d7393c746cf1/cluster_f213e044-1888-1e6c-6192-3fb82960480e/data/data4/current/BP-290164614-172.17.0.3-1731026007826/current, will proceed with Du for space computation calculation, 2024-11-08T00:33:30,605 WARN [Thread-100 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7a839e03-349b-445b-c5ab-d7393c746cf1/cluster_f213e044-1888-1e6c-6192-3fb82960480e/data/data1/current/BP-290164614-172.17.0.3-1731026007826/current, will proceed with Du for space computation calculation, 2024-11-08T00:33:30,605 WARN [Thread-101 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7a839e03-349b-445b-c5ab-d7393c746cf1/cluster_f213e044-1888-1e6c-6192-3fb82960480e/data/data2/current/BP-290164614-172.17.0.3-1731026007826/current, will proceed with Du for space computation calculation, 2024-11-08T00:33:30,605 WARN [Thread-98 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7a839e03-349b-445b-c5ab-d7393c746cf1/cluster_f213e044-1888-1e6c-6192-3fb82960480e/data/data3/current/BP-290164614-172.17.0.3-1731026007826/current, will proceed with Du for space computation calculation, 2024-11-08T00:33:30,636 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-08T00:33:30,636 WARN [Thread-81 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-08T00:33:30,681 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x792bd5ec9a61ff34 with lease ID 0x2805405a47bc1d7f: Processing first storage report for DS-efe03144-8230-4af2-a112-070b2af09abe from datanode DatanodeRegistration(127.0.0.1:42405, datanodeUuid=a6303643-b343-4c5a-b957-5a3adf502b48, infoPort=39845, infoSecurePort=0, ipcPort=45557, storageInfo=lv=-57;cid=testClusterID;nsid=582295921;c=1731026007826) 2024-11-08T00:33:30,682 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x792bd5ec9a61ff34 with lease ID 0x2805405a47bc1d7f: from storage DS-efe03144-8230-4af2-a112-070b2af09abe node DatanodeRegistration(127.0.0.1:42405, datanodeUuid=a6303643-b343-4c5a-b957-5a3adf502b48, infoPort=39845, infoSecurePort=0, ipcPort=45557, storageInfo=lv=-57;cid=testClusterID;nsid=582295921;c=1731026007826), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-08T00:33:30,682 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x961a650e8fd979e7 with lease ID 0x2805405a47bc1d80: Processing first storage report for DS-a73719d7-3eea-476a-8f62-bc8c88fea55b from datanode DatanodeRegistration(127.0.0.1:43801, datanodeUuid=56a7f2c7-ffed-426f-b640-73d18b454654, infoPort=35393, infoSecurePort=0, ipcPort=35131, storageInfo=lv=-57;cid=testClusterID;nsid=582295921;c=1731026007826) 2024-11-08T00:33:30,682 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x961a650e8fd979e7 with lease ID 0x2805405a47bc1d80: from storage DS-a73719d7-3eea-476a-8f62-bc8c88fea55b node DatanodeRegistration(127.0.0.1:43801, datanodeUuid=56a7f2c7-ffed-426f-b640-73d18b454654, infoPort=35393, infoSecurePort=0, ipcPort=35131, storageInfo=lv=-57;cid=testClusterID;nsid=582295921;c=1731026007826), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-08T00:33:30,683 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x792bd5ec9a61ff34 with lease ID 0x2805405a47bc1d7f: Processing first storage report for DS-4d65075f-d507-49e0-9d75-108a63370c9d from datanode DatanodeRegistration(127.0.0.1:42405, datanodeUuid=a6303643-b343-4c5a-b957-5a3adf502b48, infoPort=39845, infoSecurePort=0, ipcPort=45557, storageInfo=lv=-57;cid=testClusterID;nsid=582295921;c=1731026007826) 2024-11-08T00:33:30,683 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x792bd5ec9a61ff34 with lease ID 0x2805405a47bc1d7f: from storage DS-4d65075f-d507-49e0-9d75-108a63370c9d node DatanodeRegistration(127.0.0.1:42405, datanodeUuid=a6303643-b343-4c5a-b957-5a3adf502b48, infoPort=39845, infoSecurePort=0, ipcPort=45557, storageInfo=lv=-57;cid=testClusterID;nsid=582295921;c=1731026007826), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-08T00:33:30,683 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x961a650e8fd979e7 with lease ID 0x2805405a47bc1d80: Processing first storage report for DS-f7a9437e-5062-4eda-ad9d-f23bcf3db496 from datanode DatanodeRegistration(127.0.0.1:43801, datanodeUuid=56a7f2c7-ffed-426f-b640-73d18b454654, infoPort=35393, infoSecurePort=0, ipcPort=35131, storageInfo=lv=-57;cid=testClusterID;nsid=582295921;c=1731026007826) 2024-11-08T00:33:30,683 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x961a650e8fd979e7 with lease ID 0x2805405a47bc1d80: from storage DS-f7a9437e-5062-4eda-ad9d-f23bcf3db496 node DatanodeRegistration(127.0.0.1:43801, datanodeUuid=56a7f2c7-ffed-426f-b640-73d18b454654, infoPort=35393, infoSecurePort=0, ipcPort=35131, storageInfo=lv=-57;cid=testClusterID;nsid=582295921;c=1731026007826), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-08T00:33:30,714 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7a839e03-349b-445b-c5ab-d7393c746cf1 2024-11-08T00:33:30,791 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7a839e03-349b-445b-c5ab-d7393c746cf1/cluster_f213e044-1888-1e6c-6192-3fb82960480e/zookeeper_0, clientPort=56452, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7a839e03-349b-445b-c5ab-d7393c746cf1/cluster_f213e044-1888-1e6c-6192-3fb82960480e/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7a839e03-349b-445b-c5ab-d7393c746cf1/cluster_f213e044-1888-1e6c-6192-3fb82960480e/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-08T00:33:30,801 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=56452 2024-11-08T00:33:30,814 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-08T00:33:30,817 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-08T00:33:31,043 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42405 is added to blk_1073741825_1001 (size=7) 2024-11-08T00:33:31,044 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43801 is added to blk_1073741825_1001 (size=7) 2024-11-08T00:33:31,457 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:45125/user/jenkins/test-data/a52118f7-c961-aacf-3827-5de0e737d868 with version=8 2024-11-08T00:33:31,457 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:45125/user/jenkins/test-data/a52118f7-c961-aacf-3827-5de0e737d868/hbase-staging 2024-11-08T00:33:31,532 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-11-08T00:33:31,780 INFO [Time-limited test {}] client.ConnectionUtils(128): master/3302f0f507bd:0 server-side Connection retries=45 2024-11-08T00:33:31,788 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-08T00:33:31,788 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-08T00:33:31,793 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-08T00:33:31,793 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-08T00:33:31,793 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-08T00:33:31,917 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-08T00:33:31,970 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-11-08T00:33:31,978 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-11-08T00:33:31,981 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-08T00:33:32,003 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 91132 (auto-detected) 2024-11-08T00:33:32,004 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:03 (auto-detected) 2024-11-08T00:33:32,019 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:36725 2024-11-08T00:33:32,036 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:36725 connecting to ZooKeeper ensemble=127.0.0.1:56452 2024-11-08T00:33:32,169 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:367250x0, quorum=127.0.0.1:56452, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-08T00:33:32,173 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:36725-0x10117dcbe3d0000 connected 2024-11-08T00:33:32,278 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-08T00:33:32,280 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-08T00:33:32,288 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:36725-0x10117dcbe3d0000, quorum=127.0.0.1:56452, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-08T00:33:32,291 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:45125/user/jenkins/test-data/a52118f7-c961-aacf-3827-5de0e737d868, hbase.cluster.distributed=false 2024-11-08T00:33:32,312 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:36725-0x10117dcbe3d0000, quorum=127.0.0.1:56452, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-08T00:33:32,316 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=36725 2024-11-08T00:33:32,317 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=36725 2024-11-08T00:33:32,317 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=36725 2024-11-08T00:33:32,318 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=36725 2024-11-08T00:33:32,318 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=36725 2024-11-08T00:33:32,419 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/3302f0f507bd:0 server-side Connection retries=45 2024-11-08T00:33:32,421 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-08T00:33:32,421 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-08T00:33:32,421 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-08T00:33:32,422 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-08T00:33:32,422 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-08T00:33:32,425 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-08T00:33:32,428 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-08T00:33:32,429 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:37289 2024-11-08T00:33:32,432 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:37289 connecting to ZooKeeper ensemble=127.0.0.1:56452 2024-11-08T00:33:32,433 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-08T00:33:32,440 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-08T00:33:32,456 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:372890x0, quorum=127.0.0.1:56452, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-08T00:33:32,456 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:372890x0, quorum=127.0.0.1:56452, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-08T00:33:32,456 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:37289-0x10117dcbe3d0001 connected 2024-11-08T00:33:32,460 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-08T00:33:32,467 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-08T00:33:32,470 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37289-0x10117dcbe3d0001, quorum=127.0.0.1:56452, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-08T00:33:32,475 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37289-0x10117dcbe3d0001, quorum=127.0.0.1:56452, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-08T00:33:32,476 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=37289 2024-11-08T00:33:32,476 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=37289 2024-11-08T00:33:32,480 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=37289 2024-11-08T00:33:32,481 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=37289 2024-11-08T00:33:32,484 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=37289 2024-11-08T00:33:32,499 DEBUG [M:0;3302f0f507bd:36725 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;3302f0f507bd:36725 2024-11-08T00:33:32,500 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/3302f0f507bd,36725,1731026011631 2024-11-08T00:33:32,516 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36725-0x10117dcbe3d0000, quorum=127.0.0.1:56452, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-08T00:33:32,516 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37289-0x10117dcbe3d0001, quorum=127.0.0.1:56452, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-08T00:33:32,518 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:36725-0x10117dcbe3d0000, quorum=127.0.0.1:56452, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/3302f0f507bd,36725,1731026011631 2024-11-08T00:33:32,550 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36725-0x10117dcbe3d0000, quorum=127.0.0.1:56452, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T00:33:32,550 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37289-0x10117dcbe3d0001, quorum=127.0.0.1:56452, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-08T00:33:32,551 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37289-0x10117dcbe3d0001, quorum=127.0.0.1:56452, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T00:33:32,552 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:36725-0x10117dcbe3d0000, quorum=127.0.0.1:56452, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-08T00:33:32,553 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/3302f0f507bd,36725,1731026011631 from backup master directory 2024-11-08T00:33:32,561 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36725-0x10117dcbe3d0000, quorum=127.0.0.1:56452, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/3302f0f507bd,36725,1731026011631 2024-11-08T00:33:32,561 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37289-0x10117dcbe3d0001, quorum=127.0.0.1:56452, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-08T00:33:32,561 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36725-0x10117dcbe3d0000, quorum=127.0.0.1:56452, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-08T00:33:32,562 WARN [master/3302f0f507bd:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-08T00:33:32,562 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=3302f0f507bd,36725,1731026011631 2024-11-08T00:33:32,564 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-11-08T00:33:32,565 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-11-08T00:33:32,621 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:45125/user/jenkins/test-data/a52118f7-c961-aacf-3827-5de0e737d868/hbase.id] with ID: 61271bdd-8286-496c-9bf5-9367753832df 2024-11-08T00:33:32,621 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:45125/user/jenkins/test-data/a52118f7-c961-aacf-3827-5de0e737d868/.tmp/hbase.id 2024-11-08T00:33:32,634 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43801 is added to blk_1073741826_1002 (size=42) 2024-11-08T00:33:32,634 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42405 is added to blk_1073741826_1002 (size=42) 2024-11-08T00:33:32,635 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:45125/user/jenkins/test-data/a52118f7-c961-aacf-3827-5de0e737d868/.tmp/hbase.id]:[hdfs://localhost:45125/user/jenkins/test-data/a52118f7-c961-aacf-3827-5de0e737d868/hbase.id] 2024-11-08T00:33:32,679 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-08T00:33:32,683 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-08T00:33:32,702 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 16ms. 2024-11-08T00:33:32,715 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36725-0x10117dcbe3d0000, quorum=127.0.0.1:56452, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T00:33:32,715 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37289-0x10117dcbe3d0001, quorum=127.0.0.1:56452, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T00:33:32,737 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42405 is added to blk_1073741827_1003 (size=196) 2024-11-08T00:33:32,737 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43801 is added to blk_1073741827_1003 (size=196) 2024-11-08T00:33:32,754 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-08T00:33:32,755 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-08T00:33:32,761 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-08T00:33:32,789 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43801 is added to blk_1073741828_1004 (size=1189) 2024-11-08T00:33:32,790 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42405 is added to blk_1073741828_1004 (size=1189) 2024-11-08T00:33:32,808 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:45125/user/jenkins/test-data/a52118f7-c961-aacf-3827-5de0e737d868/MasterData/data/master/store 2024-11-08T00:33:32,826 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42405 is added to blk_1073741829_1005 (size=34) 2024-11-08T00:33:32,826 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43801 is added to blk_1073741829_1005 (size=34) 2024-11-08T00:33:32,831 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-11-08T00:33:32,835 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-08T00:33:32,836 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-08T00:33:32,836 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-08T00:33:32,837 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-08T00:33:32,838 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-08T00:33:32,838 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-08T00:33:32,839 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-08T00:33:32,840 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731026012836Disabling compacts and flushes for region at 1731026012836Disabling writes for close at 1731026012838 (+2 ms)Writing region close event to WAL at 1731026012838Closed at 1731026012838 2024-11-08T00:33:32,842 WARN [master/3302f0f507bd:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:45125/user/jenkins/test-data/a52118f7-c961-aacf-3827-5de0e737d868/MasterData/data/master/store/.initializing 2024-11-08T00:33:32,842 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:45125/user/jenkins/test-data/a52118f7-c961-aacf-3827-5de0e737d868/MasterData/WALs/3302f0f507bd,36725,1731026011631 2024-11-08T00:33:32,864 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=3302f0f507bd%2C36725%2C1731026011631, suffix=, logDir=hdfs://localhost:45125/user/jenkins/test-data/a52118f7-c961-aacf-3827-5de0e737d868/MasterData/WALs/3302f0f507bd,36725,1731026011631, archiveDir=hdfs://localhost:45125/user/jenkins/test-data/a52118f7-c961-aacf-3827-5de0e737d868/MasterData/oldWALs, maxLogs=10 2024-11-08T00:33:32,873 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3302f0f507bd%2C36725%2C1731026011631.1731026012869 2024-11-08T00:33:32,891 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/a52118f7-c961-aacf-3827-5de0e737d868/MasterData/WALs/3302f0f507bd,36725,1731026011631/3302f0f507bd%2C36725%2C1731026011631.1731026012869 2024-11-08T00:33:32,901 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39845:39845),(127.0.0.1/127.0.0.1:35393:35393)] 2024-11-08T00:33:32,902 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-08T00:33:32,903 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-08T00:33:32,906 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-08T00:33:32,907 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-08T00:33:32,940 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-08T00:33:32,962 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-08T00:33:32,965 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T00:33:32,968 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-08T00:33:32,968 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-08T00:33:32,971 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-08T00:33:32,972 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T00:33:32,973 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-08T00:33:32,973 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-08T00:33:32,975 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-08T00:33:32,975 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T00:33:32,976 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-08T00:33:32,977 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-08T00:33:32,979 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-08T00:33:32,979 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T00:33:32,980 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-08T00:33:32,980 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-08T00:33:32,983 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45125/user/jenkins/test-data/a52118f7-c961-aacf-3827-5de0e737d868/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-08T00:33:32,984 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45125/user/jenkins/test-data/a52118f7-c961-aacf-3827-5de0e737d868/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-08T00:33:32,990 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-08T00:33:32,990 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-08T00:33:32,994 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-08T00:33:32,998 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-08T00:33:33,003 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45125/user/jenkins/test-data/a52118f7-c961-aacf-3827-5de0e737d868/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-08T00:33:33,004 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=714201, jitterRate=-0.09184703230857849}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-08T00:33:33,011 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731026012918Initializing all the Stores at 1731026012920 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731026012920Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731026012921 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731026012921Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731026012921Cleaning up temporary data from old regions at 1731026012991 (+70 ms)Region opened successfully at 1731026013011 (+20 ms) 2024-11-08T00:33:33,013 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-08T00:33:33,044 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7d003dca, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=3302f0f507bd/172.17.0.3:0 2024-11-08T00:33:33,069 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-08T00:33:33,078 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-08T00:33:33,078 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-08T00:33:33,081 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-08T00:33:33,082 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 1 msec 2024-11-08T00:33:33,086 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 3 msec 2024-11-08T00:33:33,086 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-08T00:33:33,108 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-08T00:33:33,115 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36725-0x10117dcbe3d0000, quorum=127.0.0.1:56452, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-08T00:33:33,161 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-08T00:33:33,164 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-08T00:33:33,167 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36725-0x10117dcbe3d0000, quorum=127.0.0.1:56452, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-08T00:33:33,178 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-08T00:33:33,180 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-08T00:33:33,183 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36725-0x10117dcbe3d0000, quorum=127.0.0.1:56452, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-08T00:33:33,192 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-08T00:33:33,193 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36725-0x10117dcbe3d0000, quorum=127.0.0.1:56452, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-08T00:33:33,203 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-08T00:33:33,220 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36725-0x10117dcbe3d0000, quorum=127.0.0.1:56452, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-08T00:33:33,231 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-08T00:33:33,245 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37289-0x10117dcbe3d0001, quorum=127.0.0.1:56452, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-08T00:33:33,245 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36725-0x10117dcbe3d0000, quorum=127.0.0.1:56452, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-08T00:33:33,245 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36725-0x10117dcbe3d0000, quorum=127.0.0.1:56452, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T00:33:33,245 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37289-0x10117dcbe3d0001, quorum=127.0.0.1:56452, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T00:33:33,250 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=3302f0f507bd,36725,1731026011631, sessionid=0x10117dcbe3d0000, setting cluster-up flag (Was=false) 2024-11-08T00:33:33,276 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36725-0x10117dcbe3d0000, quorum=127.0.0.1:56452, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T00:33:33,276 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37289-0x10117dcbe3d0001, quorum=127.0.0.1:56452, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T00:33:33,308 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-08T00:33:33,313 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=3302f0f507bd,36725,1731026011631 2024-11-08T00:33:33,339 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36725-0x10117dcbe3d0000, quorum=127.0.0.1:56452, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T00:33:33,340 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37289-0x10117dcbe3d0001, quorum=127.0.0.1:56452, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T00:33:33,371 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-08T00:33:33,375 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=3302f0f507bd,36725,1731026011631 2024-11-08T00:33:33,386 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:45125/user/jenkins/test-data/a52118f7-c961-aacf-3827-5de0e737d868/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-08T00:33:33,389 INFO [RS:0;3302f0f507bd:37289 {}] regionserver.HRegionServer(746): ClusterId : 61271bdd-8286-496c-9bf5-9367753832df 2024-11-08T00:33:33,392 DEBUG [RS:0;3302f0f507bd:37289 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-08T00:33:33,405 DEBUG [RS:0;3302f0f507bd:37289 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-08T00:33:33,405 DEBUG [RS:0;3302f0f507bd:37289 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-08T00:33:33,415 DEBUG [RS:0;3302f0f507bd:37289 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-08T00:33:33,415 DEBUG [RS:0;3302f0f507bd:37289 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@486aa484, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=3302f0f507bd/172.17.0.3:0 2024-11-08T00:33:33,428 DEBUG [RS:0;3302f0f507bd:37289 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;3302f0f507bd:37289 2024-11-08T00:33:33,431 INFO [RS:0;3302f0f507bd:37289 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-08T00:33:33,431 INFO [RS:0;3302f0f507bd:37289 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-08T00:33:33,431 DEBUG [RS:0;3302f0f507bd:37289 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-08T00:33:33,434 INFO [RS:0;3302f0f507bd:37289 {}] regionserver.HRegionServer(2659): reportForDuty to master=3302f0f507bd,36725,1731026011631 with port=37289, startcode=1731026012386 2024-11-08T00:33:33,444 DEBUG [RS:0;3302f0f507bd:37289 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-08T00:33:33,457 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-08T00:33:33,466 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-08T00:33:33,475 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-08T00:33:33,481 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 3302f0f507bd,36725,1731026011631 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-08T00:33:33,489 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/3302f0f507bd:0, corePoolSize=5, maxPoolSize=5 2024-11-08T00:33:33,490 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/3302f0f507bd:0, corePoolSize=5, maxPoolSize=5 2024-11-08T00:33:33,490 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/3302f0f507bd:0, corePoolSize=5, maxPoolSize=5 2024-11-08T00:33:33,490 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/3302f0f507bd:0, corePoolSize=5, maxPoolSize=5 2024-11-08T00:33:33,490 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/3302f0f507bd:0, corePoolSize=10, maxPoolSize=10 2024-11-08T00:33:33,490 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/3302f0f507bd:0, corePoolSize=1, maxPoolSize=1 2024-11-08T00:33:33,491 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/3302f0f507bd:0, corePoolSize=2, maxPoolSize=2 2024-11-08T00:33:33,491 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/3302f0f507bd:0, corePoolSize=1, maxPoolSize=1 2024-11-08T00:33:33,492 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731026043492 2024-11-08T00:33:33,494 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-08T00:33:33,495 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-08T00:33:33,495 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-08T00:33:33,496 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-08T00:33:33,500 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-08T00:33:33,500 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-08T00:33:33,501 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-08T00:33:33,501 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-08T00:33:33,502 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T00:33:33,502 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-08T00:33:33,503 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:49465, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-11-08T00:33:33,502 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-08T00:33:33,506 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-08T00:33:33,507 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-08T00:33:33,508 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-08T00:33:33,512 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-08T00:33:33,512 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-08T00:33:33,511 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36725 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:667) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] 2024-11-08T00:33:33,516 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/3302f0f507bd:0:becomeActiveMaster-HFileCleaner.large.0-1731026013513,5,FailOnTimeoutGroup] 2024-11-08T00:33:33,517 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/3302f0f507bd:0:becomeActiveMaster-HFileCleaner.small.0-1731026013517,5,FailOnTimeoutGroup] 2024-11-08T00:33:33,517 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-08T00:33:33,518 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-08T00:33:33,519 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-08T00:33:33,519 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42405 is added to blk_1073741831_1007 (size=1321) 2024-11-08T00:33:33,519 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-08T00:33:33,520 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43801 is added to blk_1073741831_1007 (size=1321) 2024-11-08T00:33:33,521 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:45125/user/jenkins/test-data/a52118f7-c961-aacf-3827-5de0e737d868/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-08T00:33:33,522 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:45125/user/jenkins/test-data/a52118f7-c961-aacf-3827-5de0e737d868 2024-11-08T00:33:33,534 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42405 is added to blk_1073741832_1008 (size=32) 2024-11-08T00:33:33,535 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43801 is added to blk_1073741832_1008 (size=32) 2024-11-08T00:33:33,536 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-08T00:33:33,539 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-08T00:33:33,542 DEBUG [RS:0;3302f0f507bd:37289 {}] regionserver.HRegionServer(2683): Master is not running yet 2024-11-08T00:33:33,542 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-08T00:33:33,542 WARN [RS:0;3302f0f507bd:37289 {}] regionserver.HRegionServer(841): reportForDuty failed; sleeping 100 ms and then retrying. 2024-11-08T00:33:33,542 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T00:33:33,543 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-08T00:33:33,543 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-08T00:33:33,545 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-08T00:33:33,545 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T00:33:33,546 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-08T00:33:33,546 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-08T00:33:33,548 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-08T00:33:33,548 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T00:33:33,549 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-08T00:33:33,549 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-08T00:33:33,552 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-08T00:33:33,552 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T00:33:33,553 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-08T00:33:33,553 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-08T00:33:33,554 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45125/user/jenkins/test-data/a52118f7-c961-aacf-3827-5de0e737d868/data/hbase/meta/1588230740 2024-11-08T00:33:33,555 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45125/user/jenkins/test-data/a52118f7-c961-aacf-3827-5de0e737d868/data/hbase/meta/1588230740 2024-11-08T00:33:33,558 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-08T00:33:33,558 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-08T00:33:33,559 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-08T00:33:33,562 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-08T00:33:33,566 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45125/user/jenkins/test-data/a52118f7-c961-aacf-3827-5de0e737d868/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-08T00:33:33,567 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=840161, jitterRate=0.06832024455070496}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-08T00:33:33,572 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731026013537Initializing all the Stores at 1731026013538 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731026013538Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731026013539 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731026013539Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731026013539Cleaning up temporary data from old regions at 1731026013558 (+19 ms)Region opened successfully at 1731026013572 (+14 ms) 2024-11-08T00:33:33,573 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-08T00:33:33,573 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-08T00:33:33,573 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-08T00:33:33,573 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-08T00:33:33,573 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-08T00:33:33,574 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-08T00:33:33,575 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731026013573Disabling compacts and flushes for region at 1731026013573Disabling writes for close at 1731026013573Writing region close event to WAL at 1731026013574 (+1 ms)Closed at 1731026013574 2024-11-08T00:33:33,578 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-08T00:33:33,578 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-08T00:33:33,584 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-08T00:33:33,591 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-08T00:33:33,593 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-08T00:33:33,644 INFO [RS:0;3302f0f507bd:37289 {}] regionserver.HRegionServer(2659): reportForDuty to master=3302f0f507bd,36725,1731026011631 with port=37289, startcode=1731026012386 2024-11-08T00:33:33,647 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36725 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 3302f0f507bd,37289,1731026012386 2024-11-08T00:33:33,651 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36725 {}] master.ServerManager(517): Registering regionserver=3302f0f507bd,37289,1731026012386 2024-11-08T00:33:33,663 DEBUG [RS:0;3302f0f507bd:37289 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:45125/user/jenkins/test-data/a52118f7-c961-aacf-3827-5de0e737d868 2024-11-08T00:33:33,663 DEBUG [RS:0;3302f0f507bd:37289 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:45125 2024-11-08T00:33:33,663 DEBUG [RS:0;3302f0f507bd:37289 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-08T00:33:33,676 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36725-0x10117dcbe3d0000, quorum=127.0.0.1:56452, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-08T00:33:33,677 DEBUG [RS:0;3302f0f507bd:37289 {}] zookeeper.ZKUtil(111): regionserver:37289-0x10117dcbe3d0001, quorum=127.0.0.1:56452, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/3302f0f507bd,37289,1731026012386 2024-11-08T00:33:33,677 WARN [RS:0;3302f0f507bd:37289 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-08T00:33:33,678 INFO [RS:0;3302f0f507bd:37289 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-08T00:33:33,678 DEBUG [RS:0;3302f0f507bd:37289 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:45125/user/jenkins/test-data/a52118f7-c961-aacf-3827-5de0e737d868/WALs/3302f0f507bd,37289,1731026012386 2024-11-08T00:33:33,680 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [3302f0f507bd,37289,1731026012386] 2024-11-08T00:33:33,705 INFO [RS:0;3302f0f507bd:37289 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-08T00:33:33,719 INFO [RS:0;3302f0f507bd:37289 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-08T00:33:33,723 INFO [RS:0;3302f0f507bd:37289 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-08T00:33:33,724 INFO [RS:0;3302f0f507bd:37289 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-08T00:33:33,725 INFO [RS:0;3302f0f507bd:37289 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-08T00:33:33,730 INFO [RS:0;3302f0f507bd:37289 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-08T00:33:33,731 INFO [RS:0;3302f0f507bd:37289 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-08T00:33:33,731 DEBUG [RS:0;3302f0f507bd:37289 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/3302f0f507bd:0, corePoolSize=1, maxPoolSize=1 2024-11-08T00:33:33,731 DEBUG [RS:0;3302f0f507bd:37289 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/3302f0f507bd:0, corePoolSize=1, maxPoolSize=1 2024-11-08T00:33:33,732 DEBUG [RS:0;3302f0f507bd:37289 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/3302f0f507bd:0, corePoolSize=1, maxPoolSize=1 2024-11-08T00:33:33,732 DEBUG [RS:0;3302f0f507bd:37289 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/3302f0f507bd:0, corePoolSize=1, maxPoolSize=1 2024-11-08T00:33:33,732 DEBUG [RS:0;3302f0f507bd:37289 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/3302f0f507bd:0, corePoolSize=1, maxPoolSize=1 2024-11-08T00:33:33,732 DEBUG [RS:0;3302f0f507bd:37289 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/3302f0f507bd:0, corePoolSize=2, maxPoolSize=2 2024-11-08T00:33:33,732 DEBUG [RS:0;3302f0f507bd:37289 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/3302f0f507bd:0, corePoolSize=1, maxPoolSize=1 2024-11-08T00:33:33,732 DEBUG [RS:0;3302f0f507bd:37289 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/3302f0f507bd:0, corePoolSize=1, maxPoolSize=1 2024-11-08T00:33:33,732 DEBUG [RS:0;3302f0f507bd:37289 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/3302f0f507bd:0, corePoolSize=1, maxPoolSize=1 2024-11-08T00:33:33,732 DEBUG [RS:0;3302f0f507bd:37289 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/3302f0f507bd:0, corePoolSize=1, maxPoolSize=1 2024-11-08T00:33:33,733 DEBUG [RS:0;3302f0f507bd:37289 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/3302f0f507bd:0, corePoolSize=1, maxPoolSize=1 2024-11-08T00:33:33,733 DEBUG [RS:0;3302f0f507bd:37289 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/3302f0f507bd:0, corePoolSize=1, maxPoolSize=1 2024-11-08T00:33:33,733 DEBUG [RS:0;3302f0f507bd:37289 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/3302f0f507bd:0, corePoolSize=3, maxPoolSize=3 2024-11-08T00:33:33,733 DEBUG [RS:0;3302f0f507bd:37289 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/3302f0f507bd:0, corePoolSize=3, maxPoolSize=3 2024-11-08T00:33:33,734 INFO [RS:0;3302f0f507bd:37289 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-08T00:33:33,734 INFO [RS:0;3302f0f507bd:37289 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-08T00:33:33,734 INFO [RS:0;3302f0f507bd:37289 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-08T00:33:33,735 INFO [RS:0;3302f0f507bd:37289 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-08T00:33:33,735 INFO [RS:0;3302f0f507bd:37289 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-08T00:33:33,735 INFO [RS:0;3302f0f507bd:37289 {}] hbase.ChoreService(168): Chore ScheduledChore name=3302f0f507bd,37289,1731026012386-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-08T00:33:33,744 WARN [3302f0f507bd:36725 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-08T00:33:33,756 INFO [RS:0;3302f0f507bd:37289 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-08T00:33:33,758 INFO [RS:0;3302f0f507bd:37289 {}] hbase.ChoreService(168): Chore ScheduledChore name=3302f0f507bd,37289,1731026012386-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-08T00:33:33,759 INFO [RS:0;3302f0f507bd:37289 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-08T00:33:33,759 INFO [RS:0;3302f0f507bd:37289 {}] regionserver.Replication(171): 3302f0f507bd,37289,1731026012386 started 2024-11-08T00:33:33,780 INFO [RS:0;3302f0f507bd:37289 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-08T00:33:33,781 INFO [RS:0;3302f0f507bd:37289 {}] regionserver.HRegionServer(1482): Serving as 3302f0f507bd,37289,1731026012386, RpcServer on 3302f0f507bd/172.17.0.3:37289, sessionid=0x10117dcbe3d0001 2024-11-08T00:33:33,781 DEBUG [RS:0;3302f0f507bd:37289 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-08T00:33:33,781 DEBUG [RS:0;3302f0f507bd:37289 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 3302f0f507bd,37289,1731026012386 2024-11-08T00:33:33,782 DEBUG [RS:0;3302f0f507bd:37289 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '3302f0f507bd,37289,1731026012386' 2024-11-08T00:33:33,782 DEBUG [RS:0;3302f0f507bd:37289 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-08T00:33:33,783 DEBUG [RS:0;3302f0f507bd:37289 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-08T00:33:33,784 DEBUG [RS:0;3302f0f507bd:37289 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-08T00:33:33,784 DEBUG [RS:0;3302f0f507bd:37289 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-08T00:33:33,784 DEBUG [RS:0;3302f0f507bd:37289 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 3302f0f507bd,37289,1731026012386 2024-11-08T00:33:33,784 DEBUG [RS:0;3302f0f507bd:37289 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '3302f0f507bd,37289,1731026012386' 2024-11-08T00:33:33,784 DEBUG [RS:0;3302f0f507bd:37289 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-08T00:33:33,785 DEBUG [RS:0;3302f0f507bd:37289 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-08T00:33:33,785 DEBUG [RS:0;3302f0f507bd:37289 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-08T00:33:33,785 INFO [RS:0;3302f0f507bd:37289 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-08T00:33:33,785 INFO [RS:0;3302f0f507bd:37289 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-08T00:33:33,897 INFO [RS:0;3302f0f507bd:37289 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=3302f0f507bd%2C37289%2C1731026012386, suffix=, logDir=hdfs://localhost:45125/user/jenkins/test-data/a52118f7-c961-aacf-3827-5de0e737d868/WALs/3302f0f507bd,37289,1731026012386, archiveDir=hdfs://localhost:45125/user/jenkins/test-data/a52118f7-c961-aacf-3827-5de0e737d868/oldWALs, maxLogs=32 2024-11-08T00:33:33,900 INFO [RS:0;3302f0f507bd:37289 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3302f0f507bd%2C37289%2C1731026012386.1731026013899 2024-11-08T00:33:33,908 INFO [RS:0;3302f0f507bd:37289 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/a52118f7-c961-aacf-3827-5de0e737d868/WALs/3302f0f507bd,37289,1731026012386/3302f0f507bd%2C37289%2C1731026012386.1731026013899 2024-11-08T00:33:33,909 DEBUG [RS:0;3302f0f507bd:37289 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39845:39845),(127.0.0.1/127.0.0.1:35393:35393)] 2024-11-08T00:33:33,997 DEBUG [3302f0f507bd:36725 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-08T00:33:34,013 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=3302f0f507bd,37289,1731026012386 2024-11-08T00:33:34,018 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 3302f0f507bd,37289,1731026012386, state=OPENING 2024-11-08T00:33:34,073 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-08T00:33:34,087 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37289-0x10117dcbe3d0001, quorum=127.0.0.1:56452, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T00:33:34,087 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36725-0x10117dcbe3d0000, quorum=127.0.0.1:56452, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T00:33:34,090 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-08T00:33:34,090 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-08T00:33:34,093 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-08T00:33:34,096 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=3302f0f507bd,37289,1731026012386}] 2024-11-08T00:33:34,275 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-08T00:33:34,278 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:47675, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-08T00:33:34,289 INFO [RS_OPEN_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-08T00:33:34,290 INFO [RS_OPEN_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-08T00:33:34,293 INFO [RS_OPEN_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=3302f0f507bd%2C37289%2C1731026012386.meta, suffix=.meta, logDir=hdfs://localhost:45125/user/jenkins/test-data/a52118f7-c961-aacf-3827-5de0e737d868/WALs/3302f0f507bd,37289,1731026012386, archiveDir=hdfs://localhost:45125/user/jenkins/test-data/a52118f7-c961-aacf-3827-5de0e737d868/oldWALs, maxLogs=32 2024-11-08T00:33:34,295 INFO [RS_OPEN_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 3302f0f507bd%2C37289%2C1731026012386.meta.1731026014295.meta 2024-11-08T00:33:34,302 INFO [RS_OPEN_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/a52118f7-c961-aacf-3827-5de0e737d868/WALs/3302f0f507bd,37289,1731026012386/3302f0f507bd%2C37289%2C1731026012386.meta.1731026014295.meta 2024-11-08T00:33:34,306 DEBUG [RS_OPEN_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39845:39845),(127.0.0.1/127.0.0.1:35393:35393)] 2024-11-08T00:33:34,308 DEBUG [RS_OPEN_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-08T00:33:34,310 DEBUG [RS_OPEN_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-08T00:33:34,312 DEBUG [RS_OPEN_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-08T00:33:34,316 INFO [RS_OPEN_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-08T00:33:34,320 DEBUG [RS_OPEN_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-08T00:33:34,320 DEBUG [RS_OPEN_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-08T00:33:34,320 DEBUG [RS_OPEN_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-08T00:33:34,320 DEBUG [RS_OPEN_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-08T00:33:34,324 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-08T00:33:34,325 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-08T00:33:34,326 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T00:33:34,326 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-08T00:33:34,327 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-08T00:33:34,328 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-08T00:33:34,328 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T00:33:34,329 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-08T00:33:34,329 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-08T00:33:34,331 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-08T00:33:34,331 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T00:33:34,332 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-08T00:33:34,332 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-08T00:33:34,333 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-08T00:33:34,333 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T00:33:34,334 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-08T00:33:34,334 DEBUG [RS_OPEN_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-08T00:33:34,336 DEBUG [RS_OPEN_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45125/user/jenkins/test-data/a52118f7-c961-aacf-3827-5de0e737d868/data/hbase/meta/1588230740 2024-11-08T00:33:34,338 DEBUG [RS_OPEN_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45125/user/jenkins/test-data/a52118f7-c961-aacf-3827-5de0e737d868/data/hbase/meta/1588230740 2024-11-08T00:33:34,340 DEBUG [RS_OPEN_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-08T00:33:34,341 DEBUG [RS_OPEN_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-08T00:33:34,342 DEBUG [RS_OPEN_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-08T00:33:34,344 DEBUG [RS_OPEN_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-08T00:33:34,346 INFO [RS_OPEN_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=715389, jitterRate=-0.09033629298210144}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-08T00:33:34,346 DEBUG [RS_OPEN_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-08T00:33:34,347 DEBUG [RS_OPEN_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731026014321Writing region info on filesystem at 1731026014321Initializing all the Stores at 1731026014323 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731026014323Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731026014324 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731026014324Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731026014324Cleaning up temporary data from old regions at 1731026014341 (+17 ms)Running coprocessor post-open hooks at 1731026014346 (+5 ms)Region opened successfully at 1731026014347 (+1 ms) 2024-11-08T00:33:34,353 INFO [RS_OPEN_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731026014264 2024-11-08T00:33:34,362 DEBUG [RS_OPEN_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-08T00:33:34,363 INFO [RS_OPEN_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-08T00:33:34,364 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=3302f0f507bd,37289,1731026012386 2024-11-08T00:33:34,367 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 3302f0f507bd,37289,1731026012386, state=OPEN 2024-11-08T00:33:34,403 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37289-0x10117dcbe3d0001, quorum=127.0.0.1:56452, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-08T00:33:34,404 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36725-0x10117dcbe3d0000, quorum=127.0.0.1:56452, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-08T00:33:34,404 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-08T00:33:34,404 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-08T00:33:34,405 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=3302f0f507bd,37289,1731026012386 2024-11-08T00:33:34,414 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-08T00:33:34,415 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=3302f0f507bd,37289,1731026012386 in 310 msec 2024-11-08T00:33:34,421 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-08T00:33:34,421 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 833 msec 2024-11-08T00:33:34,423 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-08T00:33:34,423 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-08T00:33:34,442 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-08T00:33:34,443 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3302f0f507bd,37289,1731026012386, seqNum=-1] 2024-11-08T00:33:34,460 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-08T00:33:34,462 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:51613, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-08T00:33:34,481 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.0640 sec 2024-11-08T00:33:34,481 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731026014481, completionTime=-1 2024-11-08T00:33:34,484 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-08T00:33:34,484 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-08T00:33:34,508 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-08T00:33:34,508 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731026074508 2024-11-08T00:33:34,508 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731026134508 2024-11-08T00:33:34,508 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 24 msec 2024-11-08T00:33:34,511 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3302f0f507bd,36725,1731026011631-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-08T00:33:34,511 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3302f0f507bd,36725,1731026011631-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-08T00:33:34,511 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3302f0f507bd,36725,1731026011631-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-08T00:33:34,513 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-3302f0f507bd:36725, period=300000, unit=MILLISECONDS is enabled. 2024-11-08T00:33:34,513 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-08T00:33:34,513 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-08T00:33:34,520 DEBUG [master/3302f0f507bd:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-08T00:33:34,543 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.981sec 2024-11-08T00:33:34,544 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-08T00:33:34,545 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-08T00:33:34,546 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-08T00:33:34,546 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-08T00:33:34,547 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-08T00:33:34,547 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3302f0f507bd,36725,1731026011631-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-08T00:33:34,548 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3302f0f507bd,36725,1731026011631-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-08T00:33:34,556 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-08T00:33:34,557 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-08T00:33:34,557 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3302f0f507bd,36725,1731026011631-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-08T00:33:34,598 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@66cb686a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-08T00:33:34,601 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-11-08T00:33:34,601 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-11-08T00:33:34,605 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 3302f0f507bd,36725,-1 for getting cluster id 2024-11-08T00:33:34,607 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-08T00:33:34,615 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '61271bdd-8286-496c-9bf5-9367753832df' 2024-11-08T00:33:34,618 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-08T00:33:34,618 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "61271bdd-8286-496c-9bf5-9367753832df" 2024-11-08T00:33:34,620 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@51d365bd, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-08T00:33:34,620 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [3302f0f507bd,36725,-1] 2024-11-08T00:33:34,623 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-08T00:33:34,625 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-08T00:33:34,626 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:49314, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-08T00:33:34,629 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@32fb6022, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-08T00:33:34,630 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-08T00:33:34,661 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3302f0f507bd,37289,1731026012386, seqNum=-1] 2024-11-08T00:33:34,661 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-08T00:33:34,670 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:49250, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-08T00:33:34,697 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=3302f0f507bd,36725,1731026011631 2024-11-08T00:33:34,697 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-08T00:33:34,706 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-08T00:33:34,710 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-08T00:33:34,715 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.AsyncConnectionImpl(321): The fetched master address is 3302f0f507bd,36725,1731026011631 2024-11-08T00:33:34,718 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@65d48f8e 2024-11-08T00:33:34,719 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-08T00:33:34,722 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:49320, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-08T00:33:34,724 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36725 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-08T00:33:34,724 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36725 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-08T00:33:34,728 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36725 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.3 create 'TestLogRolling-testSlowSyncLogRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-08T00:33:34,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36725 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling 2024-11-08T00:33:34,738 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-08T00:33:34,740 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36725 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.3 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testSlowSyncLogRolling" procId is: 4 2024-11-08T00:33:34,741 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T00:33:34,743 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-08T00:33:34,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36725 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-08T00:33:34,776 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43801 is added to blk_1073741835_1011 (size=389) 2024-11-08T00:33:34,777 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42405 is added to blk_1073741835_1011 (size=389) 2024-11-08T00:33:34,779 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 40ed29b03b51d46ac60919a5e90a6b63, NAME => 'TestLogRolling-testSlowSyncLogRolling,,1731026014724.40ed29b03b51d46ac60919a5e90a6b63.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testSlowSyncLogRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:45125/user/jenkins/test-data/a52118f7-c961-aacf-3827-5de0e737d868 2024-11-08T00:33:34,790 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42405 is added to blk_1073741836_1012 (size=72) 2024-11-08T00:33:34,791 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43801 is added to blk_1073741836_1012 (size=72) 2024-11-08T00:33:34,792 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testSlowSyncLogRolling,,1731026014724.40ed29b03b51d46ac60919a5e90a6b63.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-08T00:33:34,792 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1722): Closing 40ed29b03b51d46ac60919a5e90a6b63, disabling compactions & flushes 2024-11-08T00:33:34,792 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testSlowSyncLogRolling,,1731026014724.40ed29b03b51d46ac60919a5e90a6b63. 2024-11-08T00:33:34,792 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testSlowSyncLogRolling,,1731026014724.40ed29b03b51d46ac60919a5e90a6b63. 2024-11-08T00:33:34,792 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testSlowSyncLogRolling,,1731026014724.40ed29b03b51d46ac60919a5e90a6b63. after waiting 0 ms 2024-11-08T00:33:34,792 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testSlowSyncLogRolling,,1731026014724.40ed29b03b51d46ac60919a5e90a6b63. 2024-11-08T00:33:34,792 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testSlowSyncLogRolling,,1731026014724.40ed29b03b51d46ac60919a5e90a6b63. 2024-11-08T00:33:34,792 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for 40ed29b03b51d46ac60919a5e90a6b63: Waiting for close lock at 1731026014792Disabling compacts and flushes for region at 1731026014792Disabling writes for close at 1731026014792Writing region close event to WAL at 1731026014792Closed at 1731026014792 2024-11-08T00:33:34,794 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-08T00:33:34,798 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testSlowSyncLogRolling,,1731026014724.40ed29b03b51d46ac60919a5e90a6b63.","families":{"info":[{"qualifier":"regioninfo","vlen":71,"tag":[],"timestamp":"1731026014794"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731026014794"}]},"ts":"1731026014794"} 2024-11-08T00:33:34,803 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-08T00:33:34,805 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-08T00:33:34,807 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testSlowSyncLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731026014805"}]},"ts":"1731026014805"} 2024-11-08T00:33:34,811 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testSlowSyncLogRolling, state=ENABLING in hbase:meta 2024-11-08T00:33:34,813 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=40ed29b03b51d46ac60919a5e90a6b63, ASSIGN}] 2024-11-08T00:33:34,816 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=40ed29b03b51d46ac60919a5e90a6b63, ASSIGN 2024-11-08T00:33:34,817 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=40ed29b03b51d46ac60919a5e90a6b63, ASSIGN; state=OFFLINE, location=3302f0f507bd,37289,1731026012386; forceNewPlan=false, retain=false 2024-11-08T00:33:34,970 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=40ed29b03b51d46ac60919a5e90a6b63, regionState=OPENING, regionLocation=3302f0f507bd,37289,1731026012386 2024-11-08T00:33:34,979 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=40ed29b03b51d46ac60919a5e90a6b63, ASSIGN because future has completed 2024-11-08T00:33:34,980 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 40ed29b03b51d46ac60919a5e90a6b63, server=3302f0f507bd,37289,1731026012386}] 2024-11-08T00:33:35,148 INFO [RS_OPEN_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testSlowSyncLogRolling,,1731026014724.40ed29b03b51d46ac60919a5e90a6b63. 2024-11-08T00:33:35,149 DEBUG [RS_OPEN_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 40ed29b03b51d46ac60919a5e90a6b63, NAME => 'TestLogRolling-testSlowSyncLogRolling,,1731026014724.40ed29b03b51d46ac60919a5e90a6b63.', STARTKEY => '', ENDKEY => ''} 2024-11-08T00:33:35,149 DEBUG [RS_OPEN_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testSlowSyncLogRolling 40ed29b03b51d46ac60919a5e90a6b63 2024-11-08T00:33:35,149 DEBUG [RS_OPEN_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testSlowSyncLogRolling,,1731026014724.40ed29b03b51d46ac60919a5e90a6b63.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-08T00:33:35,149 DEBUG [RS_OPEN_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 40ed29b03b51d46ac60919a5e90a6b63 2024-11-08T00:33:35,150 DEBUG [RS_OPEN_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 40ed29b03b51d46ac60919a5e90a6b63 2024-11-08T00:33:35,152 INFO [StoreOpener-40ed29b03b51d46ac60919a5e90a6b63-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 40ed29b03b51d46ac60919a5e90a6b63 2024-11-08T00:33:35,155 INFO [StoreOpener-40ed29b03b51d46ac60919a5e90a6b63-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 40ed29b03b51d46ac60919a5e90a6b63 columnFamilyName info 2024-11-08T00:33:35,155 DEBUG [StoreOpener-40ed29b03b51d46ac60919a5e90a6b63-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T00:33:35,156 INFO [StoreOpener-40ed29b03b51d46ac60919a5e90a6b63-1 {}] regionserver.HStore(327): Store=40ed29b03b51d46ac60919a5e90a6b63/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-08T00:33:35,157 DEBUG [RS_OPEN_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 40ed29b03b51d46ac60919a5e90a6b63 2024-11-08T00:33:35,158 DEBUG [RS_OPEN_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45125/user/jenkins/test-data/a52118f7-c961-aacf-3827-5de0e737d868/data/default/TestLogRolling-testSlowSyncLogRolling/40ed29b03b51d46ac60919a5e90a6b63 2024-11-08T00:33:35,159 DEBUG [RS_OPEN_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45125/user/jenkins/test-data/a52118f7-c961-aacf-3827-5de0e737d868/data/default/TestLogRolling-testSlowSyncLogRolling/40ed29b03b51d46ac60919a5e90a6b63 2024-11-08T00:33:35,160 DEBUG [RS_OPEN_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 40ed29b03b51d46ac60919a5e90a6b63 2024-11-08T00:33:35,160 DEBUG [RS_OPEN_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 40ed29b03b51d46ac60919a5e90a6b63 2024-11-08T00:33:35,162 DEBUG [RS_OPEN_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 40ed29b03b51d46ac60919a5e90a6b63 2024-11-08T00:33:35,166 DEBUG [RS_OPEN_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45125/user/jenkins/test-data/a52118f7-c961-aacf-3827-5de0e737d868/data/default/TestLogRolling-testSlowSyncLogRolling/40ed29b03b51d46ac60919a5e90a6b63/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-08T00:33:35,167 INFO [RS_OPEN_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 40ed29b03b51d46ac60919a5e90a6b63; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=810141, jitterRate=0.03014756739139557}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-08T00:33:35,167 DEBUG [RS_OPEN_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 40ed29b03b51d46ac60919a5e90a6b63 2024-11-08T00:33:35,168 DEBUG [RS_OPEN_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 40ed29b03b51d46ac60919a5e90a6b63: Running coprocessor pre-open hook at 1731026015150Writing region info on filesystem at 1731026015150Initializing all the Stores at 1731026015152 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731026015152Cleaning up temporary data from old regions at 1731026015160 (+8 ms)Running coprocessor post-open hooks at 1731026015167 (+7 ms)Region opened successfully at 1731026015168 (+1 ms) 2024-11-08T00:33:35,170 INFO [RS_OPEN_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testSlowSyncLogRolling,,1731026014724.40ed29b03b51d46ac60919a5e90a6b63., pid=6, masterSystemTime=1731026015137 2024-11-08T00:33:35,174 DEBUG [RS_OPEN_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testSlowSyncLogRolling,,1731026014724.40ed29b03b51d46ac60919a5e90a6b63. 2024-11-08T00:33:35,174 INFO [RS_OPEN_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testSlowSyncLogRolling,,1731026014724.40ed29b03b51d46ac60919a5e90a6b63. 2024-11-08T00:33:35,175 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=40ed29b03b51d46ac60919a5e90a6b63, regionState=OPEN, openSeqNum=2, regionLocation=3302f0f507bd,37289,1731026012386 2024-11-08T00:33:35,179 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 40ed29b03b51d46ac60919a5e90a6b63, server=3302f0f507bd,37289,1731026012386 because future has completed 2024-11-08T00:33:35,185 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-08T00:33:35,185 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 40ed29b03b51d46ac60919a5e90a6b63, server=3302f0f507bd,37289,1731026012386 in 201 msec 2024-11-08T00:33:35,189 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-08T00:33:35,189 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=40ed29b03b51d46ac60919a5e90a6b63, ASSIGN in 372 msec 2024-11-08T00:33:35,191 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-08T00:33:35,191 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testSlowSyncLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731026015191"}]},"ts":"1731026015191"} 2024-11-08T00:33:35,194 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testSlowSyncLogRolling, state=ENABLED in hbase:meta 2024-11-08T00:33:35,196 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-08T00:33:35,199 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling in 466 msec 2024-11-08T00:33:39,828 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-08T00:33:39,893 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-08T00:33:39,896 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testSlowSyncLogRolling' 2024-11-08T00:33:41,967 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-08T00:33:41,967 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-08T00:33:41,969 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling 2024-11-08T00:33:41,969 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling Metrics about Tables on a single HBase RegionServer 2024-11-08T00:33:41,970 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-08T00:33:41,970 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-08T00:33:41,970 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-08T00:33:41,971 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-08T00:33:44,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36725 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-08T00:33:44,837 INFO [RPCClient-NioEventLoopGroup-4-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testSlowSyncLogRolling completed 2024-11-08T00:33:44,840 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testSlowSyncLogRolling,, stopping at row=TestLogRolling-testSlowSyncLogRolling ,, for max=2147483647 with caching=100 2024-11-08T00:33:44,847 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testSlowSyncLogRolling 2024-11-08T00:33:44,848 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testSlowSyncLogRolling,,1731026014724.40ed29b03b51d46ac60919a5e90a6b63. 2024-11-08T00:33:44,849 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3302f0f507bd%2C37289%2C1731026012386.1731026024849 2024-11-08T00:33:44,862 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:33:44,862 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:33:44,862 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:33:44,862 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:33:44,862 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:33:44,863 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/a52118f7-c961-aacf-3827-5de0e737d868/WALs/3302f0f507bd,37289,1731026012386/3302f0f507bd%2C37289%2C1731026012386.1731026013899 with entries=1, filesize=443 B; new WAL /user/jenkins/test-data/a52118f7-c961-aacf-3827-5de0e737d868/WALs/3302f0f507bd,37289,1731026012386/3302f0f507bd%2C37289%2C1731026012386.1731026024849 2024-11-08T00:33:44,866 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43801 is added to blk_1073741833_1009 (size=451) 2024-11-08T00:33:44,866 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42405 is added to blk_1073741833_1009 (size=451) 2024-11-08T00:33:44,869 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:45125/user/jenkins/test-data/a52118f7-c961-aacf-3827-5de0e737d868/WALs/3302f0f507bd,37289,1731026012386/3302f0f507bd%2C37289%2C1731026012386.1731026013899 to hdfs://localhost:45125/user/jenkins/test-data/a52118f7-c961-aacf-3827-5de0e737d868/oldWALs/3302f0f507bd%2C37289%2C1731026012386.1731026013899 2024-11-08T00:33:44,880 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35393:35393),(127.0.0.1/127.0.0.1:39845:39845)] 2024-11-08T00:33:44,890 DEBUG [RPCClient-NioEventLoopGroup-4-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testSlowSyncLogRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testSlowSyncLogRolling,,1731026014724.40ed29b03b51d46ac60919a5e90a6b63., hostname=3302f0f507bd,37289,1731026012386, seqNum=2] 2024-11-08T00:33:57,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37289 {}] regionserver.HRegion(8855): Flush requested on 40ed29b03b51d46ac60919a5e90a6b63 2024-11-08T00:33:57,019 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 40ed29b03b51d46ac60919a5e90a6b63 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-08T00:33:57,073 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45125/user/jenkins/test-data/a52118f7-c961-aacf-3827-5de0e737d868/data/default/TestLogRolling-testSlowSyncLogRolling/40ed29b03b51d46ac60919a5e90a6b63/.tmp/info/bfbfde9761ec4f19bf1c9ae8cc1683cf is 1080, key is row0001/info:/1731026024894/Put/seqid=0 2024-11-08T00:33:57,084 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43801 is added to blk_1073741838_1014 (size=12509) 2024-11-08T00:33:57,085 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42405 is added to blk_1073741838_1014 (size=12509) 2024-11-08T00:33:57,085 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:45125/user/jenkins/test-data/a52118f7-c961-aacf-3827-5de0e737d868/data/default/TestLogRolling-testSlowSyncLogRolling/40ed29b03b51d46ac60919a5e90a6b63/.tmp/info/bfbfde9761ec4f19bf1c9ae8cc1683cf 2024-11-08T00:33:57,134 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45125/user/jenkins/test-data/a52118f7-c961-aacf-3827-5de0e737d868/data/default/TestLogRolling-testSlowSyncLogRolling/40ed29b03b51d46ac60919a5e90a6b63/.tmp/info/bfbfde9761ec4f19bf1c9ae8cc1683cf as hdfs://localhost:45125/user/jenkins/test-data/a52118f7-c961-aacf-3827-5de0e737d868/data/default/TestLogRolling-testSlowSyncLogRolling/40ed29b03b51d46ac60919a5e90a6b63/info/bfbfde9761ec4f19bf1c9ae8cc1683cf 2024-11-08T00:33:57,146 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45125/user/jenkins/test-data/a52118f7-c961-aacf-3827-5de0e737d868/data/default/TestLogRolling-testSlowSyncLogRolling/40ed29b03b51d46ac60919a5e90a6b63/info/bfbfde9761ec4f19bf1c9ae8cc1683cf, entries=7, sequenceid=11, filesize=12.2 K 2024-11-08T00:33:57,154 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 40ed29b03b51d46ac60919a5e90a6b63 in 136ms, sequenceid=11, compaction requested=false 2024-11-08T00:33:57,155 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 40ed29b03b51d46ac60919a5e90a6b63: 2024-11-08T00:34:00,710 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-08T00:34:03,145 INFO [FSHLog-0-hdfs://localhost:45125/user/jenkins/test-data/a52118f7-c961-aacf-3827-5de0e737d868-prefix:3302f0f507bd,37289,1731026012386 {}] wal.AbstractFSWAL(1368): Slow sync cost: 118 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:43801,DS-a73719d7-3eea-476a-8f62-bc8c88fea55b,DISK], DatanodeInfoWithStorage[127.0.0.1:42405,DS-efe03144-8230-4af2-a112-070b2af09abe,DISK]] 2024-11-08T00:34:05,148 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3302f0f507bd%2C37289%2C1731026012386.1731026045147 2024-11-08T00:34:05,355 INFO [Time-limited test {}] wal.AbstractFSWAL(1368): Slow sync cost: 205 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:43801,DS-a73719d7-3eea-476a-8f62-bc8c88fea55b,DISK], DatanodeInfoWithStorage[127.0.0.1:42405,DS-efe03144-8230-4af2-a112-070b2af09abe,DISK]] 2024-11-08T00:34:05,355 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:34:05,356 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:34:05,356 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:34:05,356 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:34:05,356 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:34:05,356 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/a52118f7-c961-aacf-3827-5de0e737d868/WALs/3302f0f507bd,37289,1731026012386/3302f0f507bd%2C37289%2C1731026012386.1731026024849 with entries=12, filesize=12.10 KB; new WAL /user/jenkins/test-data/a52118f7-c961-aacf-3827-5de0e737d868/WALs/3302f0f507bd,37289,1731026012386/3302f0f507bd%2C37289%2C1731026012386.1731026045147 2024-11-08T00:34:05,357 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35393:35393),(127.0.0.1/127.0.0.1:39845:39845)] 2024-11-08T00:34:05,357 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:45125/user/jenkins/test-data/a52118f7-c961-aacf-3827-5de0e737d868/WALs/3302f0f507bd,37289,1731026012386/3302f0f507bd%2C37289%2C1731026012386.1731026024849 is not closed yet, will try archiving it next time 2024-11-08T00:34:05,358 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42405 is added to blk_1073741837_1013 (size=12399) 2024-11-08T00:34:05,359 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43801 is added to blk_1073741837_1013 (size=12399) 2024-11-08T00:34:05,560 INFO [FSHLog-0-hdfs://localhost:45125/user/jenkins/test-data/a52118f7-c961-aacf-3827-5de0e737d868-prefix:3302f0f507bd,37289,1731026012386 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:43801,DS-a73719d7-3eea-476a-8f62-bc8c88fea55b,DISK], DatanodeInfoWithStorage[127.0.0.1:42405,DS-efe03144-8230-4af2-a112-070b2af09abe,DISK]] 2024-11-08T00:34:07,767 INFO [FSHLog-0-hdfs://localhost:45125/user/jenkins/test-data/a52118f7-c961-aacf-3827-5de0e737d868-prefix:3302f0f507bd,37289,1731026012386 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:43801,DS-a73719d7-3eea-476a-8f62-bc8c88fea55b,DISK], DatanodeInfoWithStorage[127.0.0.1:42405,DS-efe03144-8230-4af2-a112-070b2af09abe,DISK]] 2024-11-08T00:34:09,973 INFO [FSHLog-0-hdfs://localhost:45125/user/jenkins/test-data/a52118f7-c961-aacf-3827-5de0e737d868-prefix:3302f0f507bd,37289,1731026012386 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:43801,DS-a73719d7-3eea-476a-8f62-bc8c88fea55b,DISK], DatanodeInfoWithStorage[127.0.0.1:42405,DS-efe03144-8230-4af2-a112-070b2af09abe,DISK]] 2024-11-08T00:34:12,178 INFO [FSHLog-0-hdfs://localhost:45125/user/jenkins/test-data/a52118f7-c961-aacf-3827-5de0e737d868-prefix:3302f0f507bd,37289,1731026012386 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:43801,DS-a73719d7-3eea-476a-8f62-bc8c88fea55b,DISK], DatanodeInfoWithStorage[127.0.0.1:42405,DS-efe03144-8230-4af2-a112-070b2af09abe,DISK]] 2024-11-08T00:34:12,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37289 {}] regionserver.HRegion(8855): Flush requested on 40ed29b03b51d46ac60919a5e90a6b63 2024-11-08T00:34:12,178 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 40ed29b03b51d46ac60919a5e90a6b63 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-08T00:34:12,382 INFO [FSHLog-0-hdfs://localhost:45125/user/jenkins/test-data/a52118f7-c961-aacf-3827-5de0e737d868-prefix:3302f0f507bd,37289,1731026012386 {}] wal.AbstractFSWAL(1368): Slow sync cost: 202 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:43801,DS-a73719d7-3eea-476a-8f62-bc8c88fea55b,DISK], DatanodeInfoWithStorage[127.0.0.1:42405,DS-efe03144-8230-4af2-a112-070b2af09abe,DISK]] 2024-11-08T00:34:12,391 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45125/user/jenkins/test-data/a52118f7-c961-aacf-3827-5de0e737d868/data/default/TestLogRolling-testSlowSyncLogRolling/40ed29b03b51d46ac60919a5e90a6b63/.tmp/info/fbbbca4f056c445cb415bc695ced9f6f is 1080, key is row0008/info:/1731026039016/Put/seqid=0 2024-11-08T00:34:12,400 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42405 is added to blk_1073741840_1016 (size=12509) 2024-11-08T00:34:12,400 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43801 is added to blk_1073741840_1016 (size=12509) 2024-11-08T00:34:12,401 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:45125/user/jenkins/test-data/a52118f7-c961-aacf-3827-5de0e737d868/data/default/TestLogRolling-testSlowSyncLogRolling/40ed29b03b51d46ac60919a5e90a6b63/.tmp/info/fbbbca4f056c445cb415bc695ced9f6f 2024-11-08T00:34:12,411 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45125/user/jenkins/test-data/a52118f7-c961-aacf-3827-5de0e737d868/data/default/TestLogRolling-testSlowSyncLogRolling/40ed29b03b51d46ac60919a5e90a6b63/.tmp/info/fbbbca4f056c445cb415bc695ced9f6f as hdfs://localhost:45125/user/jenkins/test-data/a52118f7-c961-aacf-3827-5de0e737d868/data/default/TestLogRolling-testSlowSyncLogRolling/40ed29b03b51d46ac60919a5e90a6b63/info/fbbbca4f056c445cb415bc695ced9f6f 2024-11-08T00:34:12,422 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45125/user/jenkins/test-data/a52118f7-c961-aacf-3827-5de0e737d868/data/default/TestLogRolling-testSlowSyncLogRolling/40ed29b03b51d46ac60919a5e90a6b63/info/fbbbca4f056c445cb415bc695ced9f6f, entries=7, sequenceid=21, filesize=12.2 K 2024-11-08T00:34:12,624 INFO [FSHLog-0-hdfs://localhost:45125/user/jenkins/test-data/a52118f7-c961-aacf-3827-5de0e737d868-prefix:3302f0f507bd,37289,1731026012386 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:43801,DS-a73719d7-3eea-476a-8f62-bc8c88fea55b,DISK], DatanodeInfoWithStorage[127.0.0.1:42405,DS-efe03144-8230-4af2-a112-070b2af09abe,DISK]] 2024-11-08T00:34:12,624 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 40ed29b03b51d46ac60919a5e90a6b63 in 446ms, sequenceid=21, compaction requested=false 2024-11-08T00:34:12,625 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 40ed29b03b51d46ac60919a5e90a6b63: 2024-11-08T00:34:12,625 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=24.4 K, sizeToCheck=16.0 K 2024-11-08T00:34:12,625 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-08T00:34:12,626 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:45125/user/jenkins/test-data/a52118f7-c961-aacf-3827-5de0e737d868/data/default/TestLogRolling-testSlowSyncLogRolling/40ed29b03b51d46ac60919a5e90a6b63/info/bfbfde9761ec4f19bf1c9ae8cc1683cf because midkey is the same as first or last row 2024-11-08T00:34:14,385 INFO [FSHLog-0-hdfs://localhost:45125/user/jenkins/test-data/a52118f7-c961-aacf-3827-5de0e737d868-prefix:3302f0f507bd,37289,1731026012386 {}] wal.AbstractFSWAL(1368): Slow sync cost: 202 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:43801,DS-a73719d7-3eea-476a-8f62-bc8c88fea55b,DISK], DatanodeInfoWithStorage[127.0.0.1:42405,DS-efe03144-8230-4af2-a112-070b2af09abe,DISK]] 2024-11-08T00:34:14,600 INFO [master/3302f0f507bd:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-08T00:34:14,600 INFO [master/3302f0f507bd:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-08T00:34:16,590 INFO [FSHLog-0-hdfs://localhost:45125/user/jenkins/test-data/a52118f7-c961-aacf-3827-5de0e737d868-prefix:3302f0f507bd,37289,1731026012386 {}] wal.AbstractFSWAL(1368): Slow sync cost: 202 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:43801,DS-a73719d7-3eea-476a-8f62-bc8c88fea55b,DISK], DatanodeInfoWithStorage[127.0.0.1:42405,DS-efe03144-8230-4af2-a112-070b2af09abe,DISK]] 2024-11-08T00:34:16,595 WARN [FSHLog-0-hdfs://localhost:45125/user/jenkins/test-data/a52118f7-c961-aacf-3827-5de0e737d868-prefix:3302f0f507bd,37289,1731026012386 {}] wal.AbstractFSWAL(2201): Requesting log roll because we exceeded slow sync threshold; count=8, threshold=5, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:43801,DS-a73719d7-3eea-476a-8f62-bc8c88fea55b,DISK], DatanodeInfoWithStorage[127.0.0.1:42405,DS-efe03144-8230-4af2-a112-070b2af09abe,DISK]] 2024-11-08T00:34:16,596 DEBUG [regionserver/3302f0f507bd:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 3302f0f507bd%2C37289%2C1731026012386:(num 1731026045147) roll requested 2024-11-08T00:34:16,597 INFO [regionserver/3302f0f507bd:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3302f0f507bd%2C37289%2C1731026012386.1731026056597 2024-11-08T00:34:16,804 INFO [regionserver/3302f0f507bd:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 205 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:43801,DS-a73719d7-3eea-476a-8f62-bc8c88fea55b,DISK], DatanodeInfoWithStorage[127.0.0.1:42405,DS-efe03144-8230-4af2-a112-070b2af09abe,DISK]] 2024-11-08T00:34:16,805 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:34:16,805 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:34:16,805 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:34:16,805 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:34:16,805 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:34:16,806 INFO [regionserver/3302f0f507bd:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/a52118f7-c961-aacf-3827-5de0e737d868/WALs/3302f0f507bd,37289,1731026012386/3302f0f507bd%2C37289%2C1731026012386.1731026045147 with entries=8, filesize=7.55 KB; new WAL /user/jenkins/test-data/a52118f7-c961-aacf-3827-5de0e737d868/WALs/3302f0f507bd,37289,1731026012386/3302f0f507bd%2C37289%2C1731026012386.1731026056597 2024-11-08T00:34:16,807 DEBUG [regionserver/3302f0f507bd:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35393:35393),(127.0.0.1/127.0.0.1:39845:39845)] 2024-11-08T00:34:16,807 DEBUG [regionserver/3302f0f507bd:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:45125/user/jenkins/test-data/a52118f7-c961-aacf-3827-5de0e737d868/WALs/3302f0f507bd,37289,1731026012386/3302f0f507bd%2C37289%2C1731026012386.1731026045147 is not closed yet, will try archiving it next time 2024-11-08T00:34:16,807 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:45125/user/jenkins/test-data/a52118f7-c961-aacf-3827-5de0e737d868/WALs/3302f0f507bd,37289,1731026012386/3302f0f507bd%2C37289%2C1731026012386.1731026024849 to hdfs://localhost:45125/user/jenkins/test-data/a52118f7-c961-aacf-3827-5de0e737d868/oldWALs/3302f0f507bd%2C37289%2C1731026012386.1731026024849 2024-11-08T00:34:16,812 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43801 is added to blk_1073741839_1015 (size=7739) 2024-11-08T00:34:16,813 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42405 is added to blk_1073741839_1015 (size=7739) 2024-11-08T00:34:18,797 INFO [FSHLog-0-hdfs://localhost:45125/user/jenkins/test-data/a52118f7-c961-aacf-3827-5de0e737d868-prefix:3302f0f507bd,37289,1731026012386 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:43801,DS-a73719d7-3eea-476a-8f62-bc8c88fea55b,DISK], DatanodeInfoWithStorage[127.0.0.1:42405,DS-efe03144-8230-4af2-a112-070b2af09abe,DISK]] 2024-11-08T00:34:20,149 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 40ed29b03b51d46ac60919a5e90a6b63, had cached 0 bytes from a total of 25018 2024-11-08T00:34:21,001 INFO [FSHLog-0-hdfs://localhost:45125/user/jenkins/test-data/a52118f7-c961-aacf-3827-5de0e737d868-prefix:3302f0f507bd,37289,1731026012386 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:43801,DS-a73719d7-3eea-476a-8f62-bc8c88fea55b,DISK], DatanodeInfoWithStorage[127.0.0.1:42405,DS-efe03144-8230-4af2-a112-070b2af09abe,DISK]] 2024-11-08T00:34:23,205 INFO [FSHLog-0-hdfs://localhost:45125/user/jenkins/test-data/a52118f7-c961-aacf-3827-5de0e737d868-prefix:3302f0f507bd,37289,1731026012386 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:43801,DS-a73719d7-3eea-476a-8f62-bc8c88fea55b,DISK], DatanodeInfoWithStorage[127.0.0.1:42405,DS-efe03144-8230-4af2-a112-070b2af09abe,DISK]] 2024-11-08T00:34:25,413 INFO [FSHLog-0-hdfs://localhost:45125/user/jenkins/test-data/a52118f7-c961-aacf-3827-5de0e737d868-prefix:3302f0f507bd,37289,1731026012386 {}] wal.AbstractFSWAL(1368): Slow sync cost: 202 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:43801,DS-a73719d7-3eea-476a-8f62-bc8c88fea55b,DISK], DatanodeInfoWithStorage[127.0.0.1:42405,DS-efe03144-8230-4af2-a112-070b2af09abe,DISK]] 2024-11-08T00:34:27,416 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-08T00:34:27,416 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3302f0f507bd%2C37289%2C1731026012386.1731026067416 2024-11-08T00:34:30,711 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-08T00:34:32,426 INFO [Time-limited test {}] wal.AbstractFSWAL(1368): Slow sync cost: 5007 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:43801,DS-a73719d7-3eea-476a-8f62-bc8c88fea55b,DISK], DatanodeInfoWithStorage[127.0.0.1:42405,DS-efe03144-8230-4af2-a112-070b2af09abe,DISK]] 2024-11-08T00:34:32,428 WARN [Time-limited test {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5007 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:43801,DS-a73719d7-3eea-476a-8f62-bc8c88fea55b,DISK], DatanodeInfoWithStorage[127.0.0.1:42405,DS-efe03144-8230-4af2-a112-070b2af09abe,DISK]] 2024-11-08T00:34:32,429 DEBUG [regionserver/3302f0f507bd:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 3302f0f507bd%2C37289%2C1731026012386:(num 1731026067416) roll requested 2024-11-08T00:34:32,429 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:34:32,429 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:34:32,429 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:34:32,429 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:34:32,429 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:34:32,430 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/a52118f7-c961-aacf-3827-5de0e737d868/WALs/3302f0f507bd,37289,1731026012386/3302f0f507bd%2C37289%2C1731026012386.1731026056597 with entries=4, filesize=4.63 KB; new WAL /user/jenkins/test-data/a52118f7-c961-aacf-3827-5de0e737d868/WALs/3302f0f507bd,37289,1731026012386/3302f0f507bd%2C37289%2C1731026012386.1731026067416 2024-11-08T00:34:32,432 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42405 is added to blk_1073741841_1017 (size=4753) 2024-11-08T00:34:32,433 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43801 is added to blk_1073741841_1017 (size=4753) 2024-11-08T00:34:32,453 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35393:35393),(127.0.0.1/127.0.0.1:39845:39845)] 2024-11-08T00:34:32,453 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:45125/user/jenkins/test-data/a52118f7-c961-aacf-3827-5de0e737d868/WALs/3302f0f507bd,37289,1731026012386/3302f0f507bd%2C37289%2C1731026012386.1731026056597 is not closed yet, will try archiving it next time 2024-11-08T00:34:32,453 INFO [regionserver/3302f0f507bd:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3302f0f507bd%2C37289%2C1731026012386.1731026072453 2024-11-08T00:34:37,459 INFO [FSHLog-0-hdfs://localhost:45125/user/jenkins/test-data/a52118f7-c961-aacf-3827-5de0e737d868-prefix:3302f0f507bd,37289,1731026012386 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:43801,DS-a73719d7-3eea-476a-8f62-bc8c88fea55b,DISK], DatanodeInfoWithStorage[127.0.0.1:42405,DS-efe03144-8230-4af2-a112-070b2af09abe,DISK]] 2024-11-08T00:34:37,459 WARN [FSHLog-0-hdfs://localhost:45125/user/jenkins/test-data/a52118f7-c961-aacf-3827-5de0e737d868-prefix:3302f0f507bd,37289,1731026012386 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5000 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:43801,DS-a73719d7-3eea-476a-8f62-bc8c88fea55b,DISK], DatanodeInfoWithStorage[127.0.0.1:42405,DS-efe03144-8230-4af2-a112-070b2af09abe,DISK]] 2024-11-08T00:34:37,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37289 {}] regionserver.HRegion(8855): Flush requested on 40ed29b03b51d46ac60919a5e90a6b63 2024-11-08T00:34:37,459 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 40ed29b03b51d46ac60919a5e90a6b63 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-08T00:34:37,463 INFO [regionserver/3302f0f507bd:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 5006 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:43801,DS-a73719d7-3eea-476a-8f62-bc8c88fea55b,DISK], DatanodeInfoWithStorage[127.0.0.1:42405,DS-efe03144-8230-4af2-a112-070b2af09abe,DISK]] 2024-11-08T00:34:37,463 WARN [regionserver/3302f0f507bd:0.logRoller {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5006 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:43801,DS-a73719d7-3eea-476a-8f62-bc8c88fea55b,DISK], DatanodeInfoWithStorage[127.0.0.1:42405,DS-efe03144-8230-4af2-a112-070b2af09abe,DISK]] 2024-11-08T00:34:39,460 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-08T00:34:42,461 INFO [FSHLog-0-hdfs://localhost:45125/user/jenkins/test-data/a52118f7-c961-aacf-3827-5de0e737d868-prefix:3302f0f507bd,37289,1731026012386 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:43801,DS-a73719d7-3eea-476a-8f62-bc8c88fea55b,DISK], DatanodeInfoWithStorage[127.0.0.1:42405,DS-efe03144-8230-4af2-a112-070b2af09abe,DISK]] 2024-11-08T00:34:42,461 WARN [FSHLog-0-hdfs://localhost:45125/user/jenkins/test-data/a52118f7-c961-aacf-3827-5de0e737d868-prefix:3302f0f507bd,37289,1731026012386 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5000 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:43801,DS-a73719d7-3eea-476a-8f62-bc8c88fea55b,DISK], DatanodeInfoWithStorage[127.0.0.1:42405,DS-efe03144-8230-4af2-a112-070b2af09abe,DISK]] 2024-11-08T00:34:42,462 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:34:42,462 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:34:42,462 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:34:42,462 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:34:42,462 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:34:42,463 INFO [regionserver/3302f0f507bd:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/a52118f7-c961-aacf-3827-5de0e737d868/WALs/3302f0f507bd,37289,1731026012386/3302f0f507bd%2C37289%2C1731026012386.1731026067416 with entries=2, filesize=1.52 KB; new WAL /user/jenkins/test-data/a52118f7-c961-aacf-3827-5de0e737d868/WALs/3302f0f507bd,37289,1731026012386/3302f0f507bd%2C37289%2C1731026012386.1731026072453 2024-11-08T00:34:42,464 DEBUG [regionserver/3302f0f507bd:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39845:39845),(127.0.0.1/127.0.0.1:35393:35393)] 2024-11-08T00:34:42,464 DEBUG [regionserver/3302f0f507bd:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:45125/user/jenkins/test-data/a52118f7-c961-aacf-3827-5de0e737d868/WALs/3302f0f507bd,37289,1731026012386/3302f0f507bd%2C37289%2C1731026012386.1731026067416 is not closed yet, will try archiving it next time 2024-11-08T00:34:42,464 DEBUG [regionserver/3302f0f507bd:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 3302f0f507bd%2C37289%2C1731026012386:(num 1731026072453) roll requested 2024-11-08T00:34:42,465 INFO [regionserver/3302f0f507bd:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3302f0f507bd%2C37289%2C1731026012386.1731026082464 2024-11-08T00:34:42,466 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42405 is added to blk_1073741842_1018 (size=1569) 2024-11-08T00:34:42,467 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43801 is added to blk_1073741842_1018 (size=1569) 2024-11-08T00:34:42,468 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45125/user/jenkins/test-data/a52118f7-c961-aacf-3827-5de0e737d868/data/default/TestLogRolling-testSlowSyncLogRolling/40ed29b03b51d46ac60919a5e90a6b63/.tmp/info/5f9d1280ae6c47da918df6f936a0d255 is 1080, key is row0015/info:/1731026054181/Put/seqid=0 2024-11-08T00:34:42,479 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43801 is added to blk_1073741844_1020 (size=12509) 2024-11-08T00:34:42,480 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42405 is added to blk_1073741844_1020 (size=12509) 2024-11-08T00:34:42,480 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=31 (bloomFilter=true), to=hdfs://localhost:45125/user/jenkins/test-data/a52118f7-c961-aacf-3827-5de0e737d868/data/default/TestLogRolling-testSlowSyncLogRolling/40ed29b03b51d46ac60919a5e90a6b63/.tmp/info/5f9d1280ae6c47da918df6f936a0d255 2024-11-08T00:34:42,495 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45125/user/jenkins/test-data/a52118f7-c961-aacf-3827-5de0e737d868/data/default/TestLogRolling-testSlowSyncLogRolling/40ed29b03b51d46ac60919a5e90a6b63/.tmp/info/5f9d1280ae6c47da918df6f936a0d255 as hdfs://localhost:45125/user/jenkins/test-data/a52118f7-c961-aacf-3827-5de0e737d868/data/default/TestLogRolling-testSlowSyncLogRolling/40ed29b03b51d46ac60919a5e90a6b63/info/5f9d1280ae6c47da918df6f936a0d255 2024-11-08T00:34:42,506 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45125/user/jenkins/test-data/a52118f7-c961-aacf-3827-5de0e737d868/data/default/TestLogRolling-testSlowSyncLogRolling/40ed29b03b51d46ac60919a5e90a6b63/info/5f9d1280ae6c47da918df6f936a0d255, entries=7, sequenceid=31, filesize=12.2 K 2024-11-08T00:34:47,475 INFO [regionserver/3302f0f507bd:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 5007 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42405,DS-efe03144-8230-4af2-a112-070b2af09abe,DISK], DatanodeInfoWithStorage[127.0.0.1:43801,DS-a73719d7-3eea-476a-8f62-bc8c88fea55b,DISK]] 2024-11-08T00:34:47,475 WARN [regionserver/3302f0f507bd:0.logRoller {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5007 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42405,DS-efe03144-8230-4af2-a112-070b2af09abe,DISK], DatanodeInfoWithStorage[127.0.0.1:43801,DS-a73719d7-3eea-476a-8f62-bc8c88fea55b,DISK]] 2024-11-08T00:34:47,508 INFO [FSHLog-0-hdfs://localhost:45125/user/jenkins/test-data/a52118f7-c961-aacf-3827-5de0e737d868-prefix:3302f0f507bd,37289,1731026012386 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42405,DS-efe03144-8230-4af2-a112-070b2af09abe,DISK], DatanodeInfoWithStorage[127.0.0.1:43801,DS-a73719d7-3eea-476a-8f62-bc8c88fea55b,DISK]] 2024-11-08T00:34:47,508 WARN [FSHLog-0-hdfs://localhost:45125/user/jenkins/test-data/a52118f7-c961-aacf-3827-5de0e737d868-prefix:3302f0f507bd,37289,1731026012386 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5000 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42405,DS-efe03144-8230-4af2-a112-070b2af09abe,DISK], DatanodeInfoWithStorage[127.0.0.1:43801,DS-a73719d7-3eea-476a-8f62-bc8c88fea55b,DISK]] 2024-11-08T00:34:47,508 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 40ed29b03b51d46ac60919a5e90a6b63 in 10049ms, sequenceid=31, compaction requested=true 2024-11-08T00:34:47,508 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 40ed29b03b51d46ac60919a5e90a6b63: 2024-11-08T00:34:47,508 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:34:47,508 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:34:47,508 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=36.6 K, sizeToCheck=16.0 K 2024-11-08T00:34:47,508 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:34:47,508 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-08T00:34:47,508 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:45125/user/jenkins/test-data/a52118f7-c961-aacf-3827-5de0e737d868/data/default/TestLogRolling-testSlowSyncLogRolling/40ed29b03b51d46ac60919a5e90a6b63/info/bfbfde9761ec4f19bf1c9ae8cc1683cf because midkey is the same as first or last row 2024-11-08T00:34:47,509 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:34:47,509 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:34:47,509 INFO [regionserver/3302f0f507bd:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/a52118f7-c961-aacf-3827-5de0e737d868/WALs/3302f0f507bd,37289,1731026012386/3302f0f507bd%2C37289%2C1731026012386.1731026072453 with entries=1, filesize=430 B; new WAL /user/jenkins/test-data/a52118f7-c961-aacf-3827-5de0e737d868/WALs/3302f0f507bd,37289,1731026012386/3302f0f507bd%2C37289%2C1731026012386.1731026082464 2024-11-08T00:34:47,510 DEBUG [regionserver/3302f0f507bd:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35393:35393),(127.0.0.1/127.0.0.1:39845:39845)] 2024-11-08T00:34:47,510 DEBUG [regionserver/3302f0f507bd:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:45125/user/jenkins/test-data/a52118f7-c961-aacf-3827-5de0e737d868/WALs/3302f0f507bd,37289,1731026012386/3302f0f507bd%2C37289%2C1731026012386.1731026072453 is not closed yet, will try archiving it next time 2024-11-08T00:34:47,510 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:45125/user/jenkins/test-data/a52118f7-c961-aacf-3827-5de0e737d868/WALs/3302f0f507bd,37289,1731026012386/3302f0f507bd%2C37289%2C1731026012386.1731026045147 to hdfs://localhost:45125/user/jenkins/test-data/a52118f7-c961-aacf-3827-5de0e737d868/oldWALs/3302f0f507bd%2C37289%2C1731026012386.1731026045147 2024-11-08T00:34:47,510 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 40ed29b03b51d46ac60919a5e90a6b63:info, priority=-2147483648, current under compaction store size is 1 2024-11-08T00:34:47,510 DEBUG [regionserver/3302f0f507bd:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 3302f0f507bd%2C37289%2C1731026012386:(num 1731026082464) roll requested 2024-11-08T00:34:47,511 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3302f0f507bd%2C37289%2C1731026012386.1731026087511 2024-11-08T00:34:47,512 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42405 is added to blk_1073741843_1019 (size=438) 2024-11-08T00:34:47,512 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43801 is added to blk_1073741843_1019 (size=438) 2024-11-08T00:34:47,514 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-08T00:34:47,514 DEBUG [RS:0;3302f0f507bd:37289-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-08T00:34:47,514 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:45125/user/jenkins/test-data/a52118f7-c961-aacf-3827-5de0e737d868/WALs/3302f0f507bd,37289,1731026012386/3302f0f507bd%2C37289%2C1731026012386.1731026056597 to hdfs://localhost:45125/user/jenkins/test-data/a52118f7-c961-aacf-3827-5de0e737d868/oldWALs/3302f0f507bd%2C37289%2C1731026012386.1731026056597 2024-11-08T00:34:47,516 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:45125/user/jenkins/test-data/a52118f7-c961-aacf-3827-5de0e737d868/WALs/3302f0f507bd,37289,1731026012386/3302f0f507bd%2C37289%2C1731026012386.1731026067416 to hdfs://localhost:45125/user/jenkins/test-data/a52118f7-c961-aacf-3827-5de0e737d868/oldWALs/3302f0f507bd%2C37289%2C1731026012386.1731026067416 2024-11-08T00:34:47,518 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:45125/user/jenkins/test-data/a52118f7-c961-aacf-3827-5de0e737d868/WALs/3302f0f507bd,37289,1731026012386/3302f0f507bd%2C37289%2C1731026012386.1731026072453 to hdfs://localhost:45125/user/jenkins/test-data/a52118f7-c961-aacf-3827-5de0e737d868/oldWALs/3302f0f507bd%2C37289%2C1731026012386.1731026072453 2024-11-08T00:34:47,519 DEBUG [RS:0;3302f0f507bd:37289-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37527 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-08T00:34:47,520 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:34:47,521 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:34:47,521 DEBUG [RS:0;3302f0f507bd:37289-shortCompactions-0 {}] regionserver.HStore(1541): 40ed29b03b51d46ac60919a5e90a6b63/info is initiating minor compaction (all files) 2024-11-08T00:34:47,521 INFO [RS:0;3302f0f507bd:37289-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 40ed29b03b51d46ac60919a5e90a6b63/info in TestLogRolling-testSlowSyncLogRolling,,1731026014724.40ed29b03b51d46ac60919a5e90a6b63. 2024-11-08T00:34:47,522 INFO [RS:0;3302f0f507bd:37289-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45125/user/jenkins/test-data/a52118f7-c961-aacf-3827-5de0e737d868/data/default/TestLogRolling-testSlowSyncLogRolling/40ed29b03b51d46ac60919a5e90a6b63/info/bfbfde9761ec4f19bf1c9ae8cc1683cf, hdfs://localhost:45125/user/jenkins/test-data/a52118f7-c961-aacf-3827-5de0e737d868/data/default/TestLogRolling-testSlowSyncLogRolling/40ed29b03b51d46ac60919a5e90a6b63/info/fbbbca4f056c445cb415bc695ced9f6f, hdfs://localhost:45125/user/jenkins/test-data/a52118f7-c961-aacf-3827-5de0e737d868/data/default/TestLogRolling-testSlowSyncLogRolling/40ed29b03b51d46ac60919a5e90a6b63/info/5f9d1280ae6c47da918df6f936a0d255] into tmpdir=hdfs://localhost:45125/user/jenkins/test-data/a52118f7-c961-aacf-3827-5de0e737d868/data/default/TestLogRolling-testSlowSyncLogRolling/40ed29b03b51d46ac60919a5e90a6b63/.tmp, totalSize=36.6 K 2024-11-08T00:34:47,522 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:34:47,522 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:34:47,522 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:34:47,523 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/a52118f7-c961-aacf-3827-5de0e737d868/WALs/3302f0f507bd,37289,1731026012386/3302f0f507bd%2C37289%2C1731026012386.1731026082464 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/a52118f7-c961-aacf-3827-5de0e737d868/WALs/3302f0f507bd,37289,1731026012386/3302f0f507bd%2C37289%2C1731026012386.1731026087511 2024-11-08T00:34:47,523 DEBUG [RS:0;3302f0f507bd:37289-shortCompactions-0 {}] compactions.Compactor(225): Compacting bfbfde9761ec4f19bf1c9ae8cc1683cf, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1731026024894 2024-11-08T00:34:47,525 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42405 is added to blk_1073741845_1021 (size=93) 2024-11-08T00:34:47,525 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43801 is added to blk_1073741845_1021 (size=93) 2024-11-08T00:34:47,525 DEBUG [RS:0;3302f0f507bd:37289-shortCompactions-0 {}] compactions.Compactor(225): Compacting fbbbca4f056c445cb415bc695ced9f6f, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=21, earliestPutTs=1731026039016 2024-11-08T00:34:47,526 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:45125/user/jenkins/test-data/a52118f7-c961-aacf-3827-5de0e737d868/WALs/3302f0f507bd,37289,1731026012386/3302f0f507bd%2C37289%2C1731026012386.1731026082464 to hdfs://localhost:45125/user/jenkins/test-data/a52118f7-c961-aacf-3827-5de0e737d868/oldWALs/3302f0f507bd%2C37289%2C1731026012386.1731026082464 2024-11-08T00:34:47,526 DEBUG [RS:0;3302f0f507bd:37289-shortCompactions-0 {}] compactions.Compactor(225): Compacting 5f9d1280ae6c47da918df6f936a0d255, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=31, earliestPutTs=1731026054181 2024-11-08T00:34:47,529 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39845:39845),(127.0.0.1/127.0.0.1:35393:35393)] 2024-11-08T00:34:47,529 INFO [regionserver/3302f0f507bd:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3302f0f507bd%2C37289%2C1731026012386.1731026087529 2024-11-08T00:34:47,538 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:34:47,539 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:34:47,539 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:34:47,539 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:34:47,539 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:34:47,539 INFO [regionserver/3302f0f507bd:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/a52118f7-c961-aacf-3827-5de0e737d868/WALs/3302f0f507bd,37289,1731026012386/3302f0f507bd%2C37289%2C1731026012386.1731026087511 with entries=1, filesize=1.22 KB; new WAL /user/jenkins/test-data/a52118f7-c961-aacf-3827-5de0e737d868/WALs/3302f0f507bd,37289,1731026012386/3302f0f507bd%2C37289%2C1731026012386.1731026087529 2024-11-08T00:34:47,540 DEBUG [regionserver/3302f0f507bd:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35393:35393),(127.0.0.1/127.0.0.1:39845:39845)] 2024-11-08T00:34:47,540 DEBUG [regionserver/3302f0f507bd:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:45125/user/jenkins/test-data/a52118f7-c961-aacf-3827-5de0e737d868/WALs/3302f0f507bd,37289,1731026012386/3302f0f507bd%2C37289%2C1731026012386.1731026087511 is not closed yet, will try archiving it next time 2024-11-08T00:34:47,543 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43801 is added to blk_1073741846_1022 (size=1258) 2024-11-08T00:34:47,544 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42405 is added to blk_1073741846_1022 (size=1258) 2024-11-08T00:34:47,561 INFO [RS:0;3302f0f507bd:37289-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 40ed29b03b51d46ac60919a5e90a6b63#info#compaction#3 average throughput is 7.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-08T00:34:47,562 DEBUG [RS:0;3302f0f507bd:37289-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45125/user/jenkins/test-data/a52118f7-c961-aacf-3827-5de0e737d868/data/default/TestLogRolling-testSlowSyncLogRolling/40ed29b03b51d46ac60919a5e90a6b63/.tmp/info/dcbce559d9f842cb913e3bf1cd851a9a is 1080, key is row0001/info:/1731026024894/Put/seqid=0 2024-11-08T00:34:47,569 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42405 is added to blk_1073741848_1024 (size=27710) 2024-11-08T00:34:47,570 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43801 is added to blk_1073741848_1024 (size=27710) 2024-11-08T00:34:47,581 DEBUG [RS:0;3302f0f507bd:37289-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45125/user/jenkins/test-data/a52118f7-c961-aacf-3827-5de0e737d868/data/default/TestLogRolling-testSlowSyncLogRolling/40ed29b03b51d46ac60919a5e90a6b63/.tmp/info/dcbce559d9f842cb913e3bf1cd851a9a as hdfs://localhost:45125/user/jenkins/test-data/a52118f7-c961-aacf-3827-5de0e737d868/data/default/TestLogRolling-testSlowSyncLogRolling/40ed29b03b51d46ac60919a5e90a6b63/info/dcbce559d9f842cb913e3bf1cd851a9a 2024-11-08T00:34:47,600 INFO [RS:0;3302f0f507bd:37289-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 40ed29b03b51d46ac60919a5e90a6b63/info of 40ed29b03b51d46ac60919a5e90a6b63 into dcbce559d9f842cb913e3bf1cd851a9a(size=27.1 K), total size for store is 27.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-08T00:34:47,600 DEBUG [RS:0;3302f0f507bd:37289-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 40ed29b03b51d46ac60919a5e90a6b63: 2024-11-08T00:34:47,602 INFO [RS:0;3302f0f507bd:37289-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testSlowSyncLogRolling,,1731026014724.40ed29b03b51d46ac60919a5e90a6b63., storeName=40ed29b03b51d46ac60919a5e90a6b63/info, priority=13, startTime=1731026087510; duration=0sec 2024-11-08T00:34:47,603 DEBUG [RS:0;3302f0f507bd:37289-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-08T00:34:47,603 DEBUG [RS:0;3302f0f507bd:37289-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-08T00:34:47,603 DEBUG [RS:0;3302f0f507bd:37289-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:45125/user/jenkins/test-data/a52118f7-c961-aacf-3827-5de0e737d868/data/default/TestLogRolling-testSlowSyncLogRolling/40ed29b03b51d46ac60919a5e90a6b63/info/dcbce559d9f842cb913e3bf1cd851a9a because midkey is the same as first or last row 2024-11-08T00:34:47,603 DEBUG [RS:0;3302f0f507bd:37289-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-08T00:34:47,603 DEBUG [RS:0;3302f0f507bd:37289-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-08T00:34:47,603 DEBUG [RS:0;3302f0f507bd:37289-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:45125/user/jenkins/test-data/a52118f7-c961-aacf-3827-5de0e737d868/data/default/TestLogRolling-testSlowSyncLogRolling/40ed29b03b51d46ac60919a5e90a6b63/info/dcbce559d9f842cb913e3bf1cd851a9a because midkey is the same as first or last row 2024-11-08T00:34:47,604 DEBUG [RS:0;3302f0f507bd:37289-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-08T00:34:47,604 DEBUG [RS:0;3302f0f507bd:37289-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-08T00:34:47,604 DEBUG [RS:0;3302f0f507bd:37289-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:45125/user/jenkins/test-data/a52118f7-c961-aacf-3827-5de0e737d868/data/default/TestLogRolling-testSlowSyncLogRolling/40ed29b03b51d46ac60919a5e90a6b63/info/dcbce559d9f842cb913e3bf1cd851a9a because midkey is the same as first or last row 2024-11-08T00:34:47,604 DEBUG [RS:0;3302f0f507bd:37289-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-08T00:34:47,604 DEBUG [RS:0;3302f0f507bd:37289-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 40ed29b03b51d46ac60919a5e90a6b63:info 2024-11-08T00:34:59,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37289 {}] regionserver.HRegion(8855): Flush requested on 40ed29b03b51d46ac60919a5e90a6b63 2024-11-08T00:34:59,555 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 40ed29b03b51d46ac60919a5e90a6b63 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-08T00:34:59,563 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45125/user/jenkins/test-data/a52118f7-c961-aacf-3827-5de0e737d868/data/default/TestLogRolling-testSlowSyncLogRolling/40ed29b03b51d46ac60919a5e90a6b63/.tmp/info/38fa2d5a6764419083254f11aa0da553 is 1080, key is row0022/info:/1731026087530/Put/seqid=0 2024-11-08T00:34:59,571 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42405 is added to blk_1073741849_1025 (size=12509) 2024-11-08T00:34:59,571 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43801 is added to blk_1073741849_1025 (size=12509) 2024-11-08T00:34:59,572 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=42 (bloomFilter=true), to=hdfs://localhost:45125/user/jenkins/test-data/a52118f7-c961-aacf-3827-5de0e737d868/data/default/TestLogRolling-testSlowSyncLogRolling/40ed29b03b51d46ac60919a5e90a6b63/.tmp/info/38fa2d5a6764419083254f11aa0da553 2024-11-08T00:34:59,582 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45125/user/jenkins/test-data/a52118f7-c961-aacf-3827-5de0e737d868/data/default/TestLogRolling-testSlowSyncLogRolling/40ed29b03b51d46ac60919a5e90a6b63/.tmp/info/38fa2d5a6764419083254f11aa0da553 as hdfs://localhost:45125/user/jenkins/test-data/a52118f7-c961-aacf-3827-5de0e737d868/data/default/TestLogRolling-testSlowSyncLogRolling/40ed29b03b51d46ac60919a5e90a6b63/info/38fa2d5a6764419083254f11aa0da553 2024-11-08T00:34:59,593 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45125/user/jenkins/test-data/a52118f7-c961-aacf-3827-5de0e737d868/data/default/TestLogRolling-testSlowSyncLogRolling/40ed29b03b51d46ac60919a5e90a6b63/info/38fa2d5a6764419083254f11aa0da553, entries=7, sequenceid=42, filesize=12.2 K 2024-11-08T00:34:59,594 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 40ed29b03b51d46ac60919a5e90a6b63 in 39ms, sequenceid=42, compaction requested=false 2024-11-08T00:34:59,595 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 40ed29b03b51d46ac60919a5e90a6b63: 2024-11-08T00:34:59,595 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=39.3 K, sizeToCheck=16.0 K 2024-11-08T00:34:59,595 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-08T00:34:59,595 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:45125/user/jenkins/test-data/a52118f7-c961-aacf-3827-5de0e737d868/data/default/TestLogRolling-testSlowSyncLogRolling/40ed29b03b51d46ac60919a5e90a6b63/info/dcbce559d9f842cb913e3bf1cd851a9a because midkey is the same as first or last row 2024-11-08T00:35:00,711 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-08T00:35:05,150 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 40ed29b03b51d46ac60919a5e90a6b63, had cached 0 bytes from a total of 40219 2024-11-08T00:35:07,604 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-08T00:35:07,605 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-08T00:35:07,605 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-08T00:35:07,612 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-08T00:35:07,613 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-08T00:35:07,613 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-08T00:35:07,614 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-08T00:35:07,614 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=245640440, stopped=false 2024-11-08T00:35:07,614 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=3302f0f507bd,36725,1731026011631 2024-11-08T00:35:07,669 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37289-0x10117dcbe3d0001, quorum=127.0.0.1:56452, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-08T00:35:07,669 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36725-0x10117dcbe3d0000, quorum=127.0.0.1:56452, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-08T00:35:07,669 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36725-0x10117dcbe3d0000, quorum=127.0.0.1:56452, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T00:35:07,669 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37289-0x10117dcbe3d0001, quorum=127.0.0.1:56452, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T00:35:07,669 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-08T00:35:07,670 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-08T00:35:07,670 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:36725-0x10117dcbe3d0000, quorum=127.0.0.1:56452, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-08T00:35:07,670 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-08T00:35:07,670 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:37289-0x10117dcbe3d0001, quorum=127.0.0.1:56452, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-08T00:35:07,670 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-08T00:35:07,671 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '3302f0f507bd,37289,1731026012386' ***** 2024-11-08T00:35:07,671 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-08T00:35:07,671 INFO [RS:0;3302f0f507bd:37289 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-08T00:35:07,672 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-08T00:35:07,672 INFO [RS:0;3302f0f507bd:37289 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-08T00:35:07,672 INFO [RS:0;3302f0f507bd:37289 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-08T00:35:07,672 INFO [RS:0;3302f0f507bd:37289 {}] regionserver.HRegionServer(3091): Received CLOSE for 40ed29b03b51d46ac60919a5e90a6b63 2024-11-08T00:35:07,673 INFO [RS:0;3302f0f507bd:37289 {}] regionserver.HRegionServer(959): stopping server 3302f0f507bd,37289,1731026012386 2024-11-08T00:35:07,673 INFO [RS:0;3302f0f507bd:37289 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-08T00:35:07,673 INFO [RS:0;3302f0f507bd:37289 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;3302f0f507bd:37289. 2024-11-08T00:35:07,673 DEBUG [RS:0;3302f0f507bd:37289 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-08T00:35:07,673 DEBUG [RS:0;3302f0f507bd:37289 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-08T00:35:07,673 DEBUG [RS_CLOSE_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 40ed29b03b51d46ac60919a5e90a6b63, disabling compactions & flushes 2024-11-08T00:35:07,674 INFO [RS_CLOSE_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testSlowSyncLogRolling,,1731026014724.40ed29b03b51d46ac60919a5e90a6b63. 2024-11-08T00:35:07,674 INFO [RS:0;3302f0f507bd:37289 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-08T00:35:07,674 INFO [RS:0;3302f0f507bd:37289 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-08T00:35:07,674 DEBUG [RS_CLOSE_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testSlowSyncLogRolling,,1731026014724.40ed29b03b51d46ac60919a5e90a6b63. 2024-11-08T00:35:07,674 INFO [RS:0;3302f0f507bd:37289 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-08T00:35:07,674 DEBUG [RS_CLOSE_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testSlowSyncLogRolling,,1731026014724.40ed29b03b51d46ac60919a5e90a6b63. after waiting 0 ms 2024-11-08T00:35:07,674 INFO [RS:0;3302f0f507bd:37289 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-08T00:35:07,674 DEBUG [RS_CLOSE_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testSlowSyncLogRolling,,1731026014724.40ed29b03b51d46ac60919a5e90a6b63. 2024-11-08T00:35:07,674 INFO [RS_CLOSE_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing 40ed29b03b51d46ac60919a5e90a6b63 1/1 column families, dataSize=3.15 KB heapSize=3.63 KB 2024-11-08T00:35:07,674 INFO [RS:0;3302f0f507bd:37289 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-08T00:35:07,675 DEBUG [RS_CLOSE_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-08T00:35:07,675 DEBUG [RS:0;3302f0f507bd:37289 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, 40ed29b03b51d46ac60919a5e90a6b63=TestLogRolling-testSlowSyncLogRolling,,1731026014724.40ed29b03b51d46ac60919a5e90a6b63.} 2024-11-08T00:35:07,675 INFO [RS_CLOSE_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-08T00:35:07,675 DEBUG [RS_CLOSE_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-08T00:35:07,675 DEBUG [RS_CLOSE_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-08T00:35:07,675 DEBUG [RS_CLOSE_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-08T00:35:07,675 INFO [RS_CLOSE_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.65 KB heapSize=3.67 KB 2024-11-08T00:35:07,675 DEBUG [RS:0;3302f0f507bd:37289 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 40ed29b03b51d46ac60919a5e90a6b63 2024-11-08T00:35:07,681 DEBUG [RS_CLOSE_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45125/user/jenkins/test-data/a52118f7-c961-aacf-3827-5de0e737d868/data/default/TestLogRolling-testSlowSyncLogRolling/40ed29b03b51d46ac60919a5e90a6b63/.tmp/info/1771b90bc403466b8c2297112672e154 is 1080, key is row0029/info:/1731026101558/Put/seqid=0 2024-11-08T00:35:07,694 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43801 is added to blk_1073741850_1026 (size=8193) 2024-11-08T00:35:07,694 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42405 is added to blk_1073741850_1026 (size=8193) 2024-11-08T00:35:07,695 INFO [RS_CLOSE_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.15 KB at sequenceid=48 (bloomFilter=true), to=hdfs://localhost:45125/user/jenkins/test-data/a52118f7-c961-aacf-3827-5de0e737d868/data/default/TestLogRolling-testSlowSyncLogRolling/40ed29b03b51d46ac60919a5e90a6b63/.tmp/info/1771b90bc403466b8c2297112672e154 2024-11-08T00:35:07,702 DEBUG [RS_CLOSE_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45125/user/jenkins/test-data/a52118f7-c961-aacf-3827-5de0e737d868/data/hbase/meta/1588230740/.tmp/info/222a80681561436fad2cecdca57a376c is 195, key is TestLogRolling-testSlowSyncLogRolling,,1731026014724.40ed29b03b51d46ac60919a5e90a6b63./info:regioninfo/1731026015175/Put/seqid=0 2024-11-08T00:35:07,705 DEBUG [RS_CLOSE_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45125/user/jenkins/test-data/a52118f7-c961-aacf-3827-5de0e737d868/data/default/TestLogRolling-testSlowSyncLogRolling/40ed29b03b51d46ac60919a5e90a6b63/.tmp/info/1771b90bc403466b8c2297112672e154 as hdfs://localhost:45125/user/jenkins/test-data/a52118f7-c961-aacf-3827-5de0e737d868/data/default/TestLogRolling-testSlowSyncLogRolling/40ed29b03b51d46ac60919a5e90a6b63/info/1771b90bc403466b8c2297112672e154 2024-11-08T00:35:07,708 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42405 is added to blk_1073741851_1027 (size=7016) 2024-11-08T00:35:07,709 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43801 is added to blk_1073741851_1027 (size=7016) 2024-11-08T00:35:07,709 INFO [RS_CLOSE_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.45 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:45125/user/jenkins/test-data/a52118f7-c961-aacf-3827-5de0e737d868/data/hbase/meta/1588230740/.tmp/info/222a80681561436fad2cecdca57a376c 2024-11-08T00:35:07,715 INFO [RS_CLOSE_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45125/user/jenkins/test-data/a52118f7-c961-aacf-3827-5de0e737d868/data/default/TestLogRolling-testSlowSyncLogRolling/40ed29b03b51d46ac60919a5e90a6b63/info/1771b90bc403466b8c2297112672e154, entries=3, sequenceid=48, filesize=8.0 K 2024-11-08T00:35:07,717 INFO [RS_CLOSE_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 40ed29b03b51d46ac60919a5e90a6b63 in 42ms, sequenceid=48, compaction requested=true 2024-11-08T00:35:07,718 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731026014724.40ed29b03b51d46ac60919a5e90a6b63.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:45125/user/jenkins/test-data/a52118f7-c961-aacf-3827-5de0e737d868/data/default/TestLogRolling-testSlowSyncLogRolling/40ed29b03b51d46ac60919a5e90a6b63/info/bfbfde9761ec4f19bf1c9ae8cc1683cf, hdfs://localhost:45125/user/jenkins/test-data/a52118f7-c961-aacf-3827-5de0e737d868/data/default/TestLogRolling-testSlowSyncLogRolling/40ed29b03b51d46ac60919a5e90a6b63/info/fbbbca4f056c445cb415bc695ced9f6f, hdfs://localhost:45125/user/jenkins/test-data/a52118f7-c961-aacf-3827-5de0e737d868/data/default/TestLogRolling-testSlowSyncLogRolling/40ed29b03b51d46ac60919a5e90a6b63/info/5f9d1280ae6c47da918df6f936a0d255] to archive 2024-11-08T00:35:07,721 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731026014724.40ed29b03b51d46ac60919a5e90a6b63.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-08T00:35:07,724 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731026014724.40ed29b03b51d46ac60919a5e90a6b63.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45125/user/jenkins/test-data/a52118f7-c961-aacf-3827-5de0e737d868/data/default/TestLogRolling-testSlowSyncLogRolling/40ed29b03b51d46ac60919a5e90a6b63/info/bfbfde9761ec4f19bf1c9ae8cc1683cf to hdfs://localhost:45125/user/jenkins/test-data/a52118f7-c961-aacf-3827-5de0e737d868/archive/data/default/TestLogRolling-testSlowSyncLogRolling/40ed29b03b51d46ac60919a5e90a6b63/info/bfbfde9761ec4f19bf1c9ae8cc1683cf 2024-11-08T00:35:07,726 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731026014724.40ed29b03b51d46ac60919a5e90a6b63.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45125/user/jenkins/test-data/a52118f7-c961-aacf-3827-5de0e737d868/data/default/TestLogRolling-testSlowSyncLogRolling/40ed29b03b51d46ac60919a5e90a6b63/info/fbbbca4f056c445cb415bc695ced9f6f to hdfs://localhost:45125/user/jenkins/test-data/a52118f7-c961-aacf-3827-5de0e737d868/archive/data/default/TestLogRolling-testSlowSyncLogRolling/40ed29b03b51d46ac60919a5e90a6b63/info/fbbbca4f056c445cb415bc695ced9f6f 2024-11-08T00:35:07,728 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731026014724.40ed29b03b51d46ac60919a5e90a6b63.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45125/user/jenkins/test-data/a52118f7-c961-aacf-3827-5de0e737d868/data/default/TestLogRolling-testSlowSyncLogRolling/40ed29b03b51d46ac60919a5e90a6b63/info/5f9d1280ae6c47da918df6f936a0d255 to hdfs://localhost:45125/user/jenkins/test-data/a52118f7-c961-aacf-3827-5de0e737d868/archive/data/default/TestLogRolling-testSlowSyncLogRolling/40ed29b03b51d46ac60919a5e90a6b63/info/5f9d1280ae6c47da918df6f936a0d255 2024-11-08T00:35:07,735 INFO [regionserver/3302f0f507bd:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-08T00:35:07,735 INFO [regionserver/3302f0f507bd:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-08T00:35:07,736 DEBUG [RS_CLOSE_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45125/user/jenkins/test-data/a52118f7-c961-aacf-3827-5de0e737d868/data/hbase/meta/1588230740/.tmp/ns/8de13103a9b2418bb716bd42a6f08ea7 is 43, key is default/ns:d/1731026014466/Put/seqid=0 2024-11-08T00:35:07,740 INFO [regionserver/3302f0f507bd:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-08T00:35:07,742 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731026014724.40ed29b03b51d46ac60919a5e90a6b63.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=3302f0f507bd:36725 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-11-08T00:35:07,743 WARN [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731026014724.40ed29b03b51d46ac60919a5e90a6b63.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [bfbfde9761ec4f19bf1c9ae8cc1683cf=12509, fbbbca4f056c445cb415bc695ced9f6f=12509, 5f9d1280ae6c47da918df6f936a0d255=12509] 2024-11-08T00:35:07,745 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42405 is added to blk_1073741852_1028 (size=5153) 2024-11-08T00:35:07,745 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43801 is added to blk_1073741852_1028 (size=5153) 2024-11-08T00:35:07,746 INFO [RS_CLOSE_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:45125/user/jenkins/test-data/a52118f7-c961-aacf-3827-5de0e737d868/data/hbase/meta/1588230740/.tmp/ns/8de13103a9b2418bb716bd42a6f08ea7 2024-11-08T00:35:07,749 DEBUG [RS_CLOSE_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45125/user/jenkins/test-data/a52118f7-c961-aacf-3827-5de0e737d868/data/default/TestLogRolling-testSlowSyncLogRolling/40ed29b03b51d46ac60919a5e90a6b63/recovered.edits/51.seqid, newMaxSeqId=51, maxSeqId=1 2024-11-08T00:35:07,752 INFO [RS_CLOSE_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testSlowSyncLogRolling,,1731026014724.40ed29b03b51d46ac60919a5e90a6b63. 2024-11-08T00:35:07,752 DEBUG [RS_CLOSE_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 40ed29b03b51d46ac60919a5e90a6b63: Waiting for close lock at 1731026107673Running coprocessor pre-close hooks at 1731026107673Disabling compacts and flushes for region at 1731026107673Disabling writes for close at 1731026107674 (+1 ms)Obtaining lock to block concurrent updates at 1731026107674Preparing flush snapshotting stores in 40ed29b03b51d46ac60919a5e90a6b63 at 1731026107674Finished memstore snapshotting TestLogRolling-testSlowSyncLogRolling,,1731026014724.40ed29b03b51d46ac60919a5e90a6b63., syncing WAL and waiting on mvcc, flushsize=dataSize=3228, getHeapSize=3696, getOffHeapSize=0, getCellsCount=3 at 1731026107675 (+1 ms)Flushing stores of TestLogRolling-testSlowSyncLogRolling,,1731026014724.40ed29b03b51d46ac60919a5e90a6b63. at 1731026107676 (+1 ms)Flushing 40ed29b03b51d46ac60919a5e90a6b63/info: creating writer at 1731026107676Flushing 40ed29b03b51d46ac60919a5e90a6b63/info: appending metadata at 1731026107680 (+4 ms)Flushing 40ed29b03b51d46ac60919a5e90a6b63/info: closing flushed file at 1731026107680Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@25421268: reopening flushed file at 1731026107704 (+24 ms)Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 40ed29b03b51d46ac60919a5e90a6b63 in 42ms, sequenceid=48, compaction requested=true at 1731026107717 (+13 ms)Writing region close event to WAL at 1731026107744 (+27 ms)Running coprocessor post-close hooks at 1731026107750 (+6 ms)Closed at 1731026107752 (+2 ms) 2024-11-08T00:35:07,753 DEBUG [RS_CLOSE_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testSlowSyncLogRolling,,1731026014724.40ed29b03b51d46ac60919a5e90a6b63. 2024-11-08T00:35:07,772 DEBUG [RS_CLOSE_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45125/user/jenkins/test-data/a52118f7-c961-aacf-3827-5de0e737d868/data/hbase/meta/1588230740/.tmp/table/9e59d3fc4cda42629874ec865d14445c is 73, key is TestLogRolling-testSlowSyncLogRolling/table:state/1731026015191/Put/seqid=0 2024-11-08T00:35:07,777 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42405 is added to blk_1073741853_1029 (size=5396) 2024-11-08T00:35:07,778 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43801 is added to blk_1073741853_1029 (size=5396) 2024-11-08T00:35:07,779 INFO [RS_CLOSE_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=138 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:45125/user/jenkins/test-data/a52118f7-c961-aacf-3827-5de0e737d868/data/hbase/meta/1588230740/.tmp/table/9e59d3fc4cda42629874ec865d14445c 2024-11-08T00:35:07,788 DEBUG [RS_CLOSE_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45125/user/jenkins/test-data/a52118f7-c961-aacf-3827-5de0e737d868/data/hbase/meta/1588230740/.tmp/info/222a80681561436fad2cecdca57a376c as hdfs://localhost:45125/user/jenkins/test-data/a52118f7-c961-aacf-3827-5de0e737d868/data/hbase/meta/1588230740/info/222a80681561436fad2cecdca57a376c 2024-11-08T00:35:07,797 INFO [RS_CLOSE_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45125/user/jenkins/test-data/a52118f7-c961-aacf-3827-5de0e737d868/data/hbase/meta/1588230740/info/222a80681561436fad2cecdca57a376c, entries=10, sequenceid=11, filesize=6.9 K 2024-11-08T00:35:07,799 DEBUG [RS_CLOSE_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45125/user/jenkins/test-data/a52118f7-c961-aacf-3827-5de0e737d868/data/hbase/meta/1588230740/.tmp/ns/8de13103a9b2418bb716bd42a6f08ea7 as hdfs://localhost:45125/user/jenkins/test-data/a52118f7-c961-aacf-3827-5de0e737d868/data/hbase/meta/1588230740/ns/8de13103a9b2418bb716bd42a6f08ea7 2024-11-08T00:35:07,808 INFO [RS_CLOSE_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45125/user/jenkins/test-data/a52118f7-c961-aacf-3827-5de0e737d868/data/hbase/meta/1588230740/ns/8de13103a9b2418bb716bd42a6f08ea7, entries=2, sequenceid=11, filesize=5.0 K 2024-11-08T00:35:07,809 DEBUG [RS_CLOSE_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45125/user/jenkins/test-data/a52118f7-c961-aacf-3827-5de0e737d868/data/hbase/meta/1588230740/.tmp/table/9e59d3fc4cda42629874ec865d14445c as hdfs://localhost:45125/user/jenkins/test-data/a52118f7-c961-aacf-3827-5de0e737d868/data/hbase/meta/1588230740/table/9e59d3fc4cda42629874ec865d14445c 2024-11-08T00:35:07,817 INFO [RS_CLOSE_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45125/user/jenkins/test-data/a52118f7-c961-aacf-3827-5de0e737d868/data/hbase/meta/1588230740/table/9e59d3fc4cda42629874ec865d14445c, entries=2, sequenceid=11, filesize=5.3 K 2024-11-08T00:35:07,819 INFO [RS_CLOSE_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.65 KB/1692, heapSize ~3.38 KB/3456, currentSize=0 B/0 for 1588230740 in 143ms, sequenceid=11, compaction requested=false 2024-11-08T00:35:07,825 DEBUG [RS_CLOSE_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45125/user/jenkins/test-data/a52118f7-c961-aacf-3827-5de0e737d868/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-08T00:35:07,826 DEBUG [RS_CLOSE_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-08T00:35:07,826 INFO [RS_CLOSE_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-08T00:35:07,826 DEBUG [RS_CLOSE_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731026107674Running coprocessor pre-close hooks at 1731026107675 (+1 ms)Disabling compacts and flushes for region at 1731026107675Disabling writes for close at 1731026107675Obtaining lock to block concurrent updates at 1731026107675Preparing flush snapshotting stores in 1588230740 at 1731026107675Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1692, getHeapSize=3696, getOffHeapSize=0, getCellsCount=14 at 1731026107676 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1731026107677 (+1 ms)Flushing 1588230740/info: creating writer at 1731026107677Flushing 1588230740/info: appending metadata at 1731026107702 (+25 ms)Flushing 1588230740/info: closing flushed file at 1731026107702Flushing 1588230740/ns: creating writer at 1731026107718 (+16 ms)Flushing 1588230740/ns: appending metadata at 1731026107736 (+18 ms)Flushing 1588230740/ns: closing flushed file at 1731026107736Flushing 1588230740/table: creating writer at 1731026107754 (+18 ms)Flushing 1588230740/table: appending metadata at 1731026107771 (+17 ms)Flushing 1588230740/table: closing flushed file at 1731026107771Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@368bb792: reopening flushed file at 1731026107787 (+16 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7f50c37f: reopening flushed file at 1731026107797 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6a0464f9: reopening flushed file at 1731026107808 (+11 ms)Finished flush of dataSize ~1.65 KB/1692, heapSize ~3.38 KB/3456, currentSize=0 B/0 for 1588230740 in 143ms, sequenceid=11, compaction requested=false at 1731026107819 (+11 ms)Writing region close event to WAL at 1731026107820 (+1 ms)Running coprocessor post-close hooks at 1731026107825 (+5 ms)Closed at 1731026107826 (+1 ms) 2024-11-08T00:35:07,826 DEBUG [RS_CLOSE_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-08T00:35:07,876 INFO [RS:0;3302f0f507bd:37289 {}] regionserver.HRegionServer(976): stopping server 3302f0f507bd,37289,1731026012386; all regions closed. 2024-11-08T00:35:07,878 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:35:07,878 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:35:07,878 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:35:07,879 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:35:07,879 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:35:07,883 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43801 is added to blk_1073741834_1010 (size=3066) 2024-11-08T00:35:07,883 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42405 is added to blk_1073741834_1010 (size=3066) 2024-11-08T00:35:07,888 DEBUG [RS:0;3302f0f507bd:37289 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/a52118f7-c961-aacf-3827-5de0e737d868/oldWALs 2024-11-08T00:35:07,888 INFO [RS:0;3302f0f507bd:37289 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 3302f0f507bd%2C37289%2C1731026012386.meta:.meta(num 1731026014295) 2024-11-08T00:35:07,889 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:35:07,889 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:35:07,889 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:35:07,889 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:35:07,889 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:35:07,891 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42405 is added to blk_1073741847_1023 (size=12695) 2024-11-08T00:35:07,891 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43801 is added to blk_1073741847_1023 (size=12695) 2024-11-08T00:35:07,895 DEBUG [RS:0;3302f0f507bd:37289 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/a52118f7-c961-aacf-3827-5de0e737d868/oldWALs 2024-11-08T00:35:07,896 INFO [RS:0;3302f0f507bd:37289 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 3302f0f507bd%2C37289%2C1731026012386:(num 1731026087529) 2024-11-08T00:35:07,896 DEBUG [RS:0;3302f0f507bd:37289 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-08T00:35:07,896 INFO [RS:0;3302f0f507bd:37289 {}] regionserver.LeaseManager(133): Closed leases 2024-11-08T00:35:07,896 INFO [RS:0;3302f0f507bd:37289 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-08T00:35:07,896 INFO [RS:0;3302f0f507bd:37289 {}] hbase.ChoreService(370): Chore service for: regionserver/3302f0f507bd:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-08T00:35:07,896 INFO [RS:0;3302f0f507bd:37289 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-08T00:35:07,896 INFO [regionserver/3302f0f507bd:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-08T00:35:07,897 INFO [RS:0;3302f0f507bd:37289 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:37289 2024-11-08T00:35:07,911 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37289-0x10117dcbe3d0001, quorum=127.0.0.1:56452, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/3302f0f507bd,37289,1731026012386 2024-11-08T00:35:07,911 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36725-0x10117dcbe3d0000, quorum=127.0.0.1:56452, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-08T00:35:07,911 INFO [RS:0;3302f0f507bd:37289 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-08T00:35:07,912 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [3302f0f507bd,37289,1731026012386] 2024-11-08T00:35:07,932 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/3302f0f507bd,37289,1731026012386 already deleted, retry=false 2024-11-08T00:35:07,932 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 3302f0f507bd,37289,1731026012386 expired; onlineServers=0 2024-11-08T00:35:07,932 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '3302f0f507bd,36725,1731026011631' ***** 2024-11-08T00:35:07,933 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-08T00:35:07,933 INFO [M:0;3302f0f507bd:36725 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-08T00:35:07,933 INFO [M:0;3302f0f507bd:36725 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-08T00:35:07,933 DEBUG [M:0;3302f0f507bd:36725 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-08T00:35:07,933 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-08T00:35:07,933 DEBUG [M:0;3302f0f507bd:36725 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-08T00:35:07,933 DEBUG [master/3302f0f507bd:0:becomeActiveMaster-HFileCleaner.small.0-1731026013517 {}] cleaner.HFileCleaner(306): Exit Thread[master/3302f0f507bd:0:becomeActiveMaster-HFileCleaner.small.0-1731026013517,5,FailOnTimeoutGroup] 2024-11-08T00:35:07,933 DEBUG [master/3302f0f507bd:0:becomeActiveMaster-HFileCleaner.large.0-1731026013513 {}] cleaner.HFileCleaner(306): Exit Thread[master/3302f0f507bd:0:becomeActiveMaster-HFileCleaner.large.0-1731026013513,5,FailOnTimeoutGroup] 2024-11-08T00:35:07,934 INFO [M:0;3302f0f507bd:36725 {}] hbase.ChoreService(370): Chore service for: master/3302f0f507bd:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-08T00:35:07,934 INFO [M:0;3302f0f507bd:36725 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-08T00:35:07,934 DEBUG [M:0;3302f0f507bd:36725 {}] master.HMaster(1795): Stopping service threads 2024-11-08T00:35:07,934 INFO [M:0;3302f0f507bd:36725 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-08T00:35:07,934 INFO [M:0;3302f0f507bd:36725 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-08T00:35:07,935 INFO [M:0;3302f0f507bd:36725 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-08T00:35:07,935 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-08T00:35:07,943 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36725-0x10117dcbe3d0000, quorum=127.0.0.1:56452, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-08T00:35:07,943 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36725-0x10117dcbe3d0000, quorum=127.0.0.1:56452, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T00:35:07,943 DEBUG [M:0;3302f0f507bd:36725 {}] zookeeper.ZKUtil(347): master:36725-0x10117dcbe3d0000, quorum=127.0.0.1:56452, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-08T00:35:07,943 WARN [M:0;3302f0f507bd:36725 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-08T00:35:07,944 INFO [M:0;3302f0f507bd:36725 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:45125/user/jenkins/test-data/a52118f7-c961-aacf-3827-5de0e737d868/.lastflushedseqids 2024-11-08T00:35:07,959 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43801 is added to blk_1073741854_1030 (size=130) 2024-11-08T00:35:07,960 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42405 is added to blk_1073741854_1030 (size=130) 2024-11-08T00:35:07,960 INFO [M:0;3302f0f507bd:36725 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-08T00:35:07,960 INFO [M:0;3302f0f507bd:36725 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-08T00:35:07,960 DEBUG [M:0;3302f0f507bd:36725 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-08T00:35:07,961 INFO [M:0;3302f0f507bd:36725 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-08T00:35:07,961 DEBUG [M:0;3302f0f507bd:36725 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-08T00:35:07,961 DEBUG [M:0;3302f0f507bd:36725 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-08T00:35:07,961 DEBUG [M:0;3302f0f507bd:36725 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-08T00:35:07,961 INFO [M:0;3302f0f507bd:36725 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.04 KB heapSize=29.21 KB 2024-11-08T00:35:07,979 DEBUG [M:0;3302f0f507bd:36725 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45125/user/jenkins/test-data/a52118f7-c961-aacf-3827-5de0e737d868/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/8a108c1184be4a0cbf00066202870186 is 82, key is hbase:meta,,1/info:regioninfo/1731026014364/Put/seqid=0 2024-11-08T00:35:07,985 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42405 is added to blk_1073741855_1031 (size=5672) 2024-11-08T00:35:07,985 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43801 is added to blk_1073741855_1031 (size=5672) 2024-11-08T00:35:07,986 INFO [M:0;3302f0f507bd:36725 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:45125/user/jenkins/test-data/a52118f7-c961-aacf-3827-5de0e737d868/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/8a108c1184be4a0cbf00066202870186 2024-11-08T00:35:08,010 DEBUG [M:0;3302f0f507bd:36725 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45125/user/jenkins/test-data/a52118f7-c961-aacf-3827-5de0e737d868/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/7f1f07eb258f469b93d3e1f69e2b44f5 is 767, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731026015198/Put/seqid=0 2024-11-08T00:35:08,016 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42405 is added to blk_1073741856_1032 (size=6248) 2024-11-08T00:35:08,016 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43801 is added to blk_1073741856_1032 (size=6248) 2024-11-08T00:35:08,017 INFO [M:0;3302f0f507bd:36725 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.43 KB at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:45125/user/jenkins/test-data/a52118f7-c961-aacf-3827-5de0e737d868/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/7f1f07eb258f469b93d3e1f69e2b44f5 2024-11-08T00:35:08,022 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37289-0x10117dcbe3d0001, quorum=127.0.0.1:56452, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-08T00:35:08,022 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37289-0x10117dcbe3d0001, quorum=127.0.0.1:56452, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-08T00:35:08,023 INFO [RS:0;3302f0f507bd:37289 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-08T00:35:08,023 INFO [RS:0;3302f0f507bd:37289 {}] regionserver.HRegionServer(1031): Exiting; stopping=3302f0f507bd,37289,1731026012386; zookeeper connection closed. 2024-11-08T00:35:08,023 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@7e4734ee {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@7e4734ee 2024-11-08T00:35:08,024 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-08T00:35:08,025 INFO [M:0;3302f0f507bd:36725 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 7f1f07eb258f469b93d3e1f69e2b44f5 2024-11-08T00:35:08,044 DEBUG [M:0;3302f0f507bd:36725 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45125/user/jenkins/test-data/a52118f7-c961-aacf-3827-5de0e737d868/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/03d27f319af846f3a7e925a83a61b3ab is 69, key is 3302f0f507bd,37289,1731026012386/rs:state/1731026013655/Put/seqid=0 2024-11-08T00:35:08,051 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42405 is added to blk_1073741857_1033 (size=5156) 2024-11-08T00:35:08,051 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43801 is added to blk_1073741857_1033 (size=5156) 2024-11-08T00:35:08,052 INFO [M:0;3302f0f507bd:36725 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:45125/user/jenkins/test-data/a52118f7-c961-aacf-3827-5de0e737d868/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/03d27f319af846f3a7e925a83a61b3ab 2024-11-08T00:35:08,076 DEBUG [M:0;3302f0f507bd:36725 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45125/user/jenkins/test-data/a52118f7-c961-aacf-3827-5de0e737d868/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/33d48c9f2328415288f8bd76ab9fb197 is 52, key is load_balancer_on/state:d/1731026014703/Put/seqid=0 2024-11-08T00:35:08,081 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43801 is added to blk_1073741858_1034 (size=5056) 2024-11-08T00:35:08,082 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42405 is added to blk_1073741858_1034 (size=5056) 2024-11-08T00:35:08,082 INFO [M:0;3302f0f507bd:36725 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:45125/user/jenkins/test-data/a52118f7-c961-aacf-3827-5de0e737d868/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/33d48c9f2328415288f8bd76ab9fb197 2024-11-08T00:35:08,091 DEBUG [M:0;3302f0f507bd:36725 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45125/user/jenkins/test-data/a52118f7-c961-aacf-3827-5de0e737d868/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/8a108c1184be4a0cbf00066202870186 as hdfs://localhost:45125/user/jenkins/test-data/a52118f7-c961-aacf-3827-5de0e737d868/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/8a108c1184be4a0cbf00066202870186 2024-11-08T00:35:08,099 INFO [M:0;3302f0f507bd:36725 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45125/user/jenkins/test-data/a52118f7-c961-aacf-3827-5de0e737d868/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/8a108c1184be4a0cbf00066202870186, entries=8, sequenceid=59, filesize=5.5 K 2024-11-08T00:35:08,101 DEBUG [M:0;3302f0f507bd:36725 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45125/user/jenkins/test-data/a52118f7-c961-aacf-3827-5de0e737d868/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/7f1f07eb258f469b93d3e1f69e2b44f5 as hdfs://localhost:45125/user/jenkins/test-data/a52118f7-c961-aacf-3827-5de0e737d868/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/7f1f07eb258f469b93d3e1f69e2b44f5 2024-11-08T00:35:08,109 INFO [M:0;3302f0f507bd:36725 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 7f1f07eb258f469b93d3e1f69e2b44f5 2024-11-08T00:35:08,109 INFO [M:0;3302f0f507bd:36725 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45125/user/jenkins/test-data/a52118f7-c961-aacf-3827-5de0e737d868/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/7f1f07eb258f469b93d3e1f69e2b44f5, entries=6, sequenceid=59, filesize=6.1 K 2024-11-08T00:35:08,110 DEBUG [M:0;3302f0f507bd:36725 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45125/user/jenkins/test-data/a52118f7-c961-aacf-3827-5de0e737d868/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/03d27f319af846f3a7e925a83a61b3ab as hdfs://localhost:45125/user/jenkins/test-data/a52118f7-c961-aacf-3827-5de0e737d868/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/03d27f319af846f3a7e925a83a61b3ab 2024-11-08T00:35:08,118 INFO [M:0;3302f0f507bd:36725 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45125/user/jenkins/test-data/a52118f7-c961-aacf-3827-5de0e737d868/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/03d27f319af846f3a7e925a83a61b3ab, entries=1, sequenceid=59, filesize=5.0 K 2024-11-08T00:35:08,119 DEBUG [M:0;3302f0f507bd:36725 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45125/user/jenkins/test-data/a52118f7-c961-aacf-3827-5de0e737d868/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/33d48c9f2328415288f8bd76ab9fb197 as hdfs://localhost:45125/user/jenkins/test-data/a52118f7-c961-aacf-3827-5de0e737d868/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/33d48c9f2328415288f8bd76ab9fb197 2024-11-08T00:35:08,126 INFO [M:0;3302f0f507bd:36725 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45125/user/jenkins/test-data/a52118f7-c961-aacf-3827-5de0e737d868/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/33d48c9f2328415288f8bd76ab9fb197, entries=1, sequenceid=59, filesize=4.9 K 2024-11-08T00:35:08,128 INFO [M:0;3302f0f507bd:36725 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.04 KB/23588, heapSize ~29.15 KB/29848, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 166ms, sequenceid=59, compaction requested=false 2024-11-08T00:35:08,129 INFO [M:0;3302f0f507bd:36725 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-08T00:35:08,129 DEBUG [M:0;3302f0f507bd:36725 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731026107960Disabling compacts and flushes for region at 1731026107960Disabling writes for close at 1731026107961 (+1 ms)Obtaining lock to block concurrent updates at 1731026107961Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731026107961Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23588, getHeapSize=29848, getOffHeapSize=0, getCellsCount=70 at 1731026107962 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731026107962Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731026107963 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731026107979 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731026107979Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731026107992 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731026108009 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731026108009Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731026108026 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731026108044 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731026108044Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731026108059 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731026108075 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731026108075Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@76ebf01d: reopening flushed file at 1731026108090 (+15 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@341a37c9: reopening flushed file at 1731026108099 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3c3f8f83: reopening flushed file at 1731026108109 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4c39d4b0: reopening flushed file at 1731026108118 (+9 ms)Finished flush of dataSize ~23.04 KB/23588, heapSize ~29.15 KB/29848, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 166ms, sequenceid=59, compaction requested=false at 1731026108128 (+10 ms)Writing region close event to WAL at 1731026108129 (+1 ms)Closed at 1731026108129 2024-11-08T00:35:08,130 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:35:08,130 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:35:08,130 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:35:08,130 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:35:08,131 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:35:08,133 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43801 is added to blk_1073741830_1006 (size=27985) 2024-11-08T00:35:08,133 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42405 is added to blk_1073741830_1006 (size=27985) 2024-11-08T00:35:08,134 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-08T00:35:08,134 INFO [M:0;3302f0f507bd:36725 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-08T00:35:08,134 INFO [M:0;3302f0f507bd:36725 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:36725 2024-11-08T00:35:08,134 INFO [M:0;3302f0f507bd:36725 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-08T00:35:08,243 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36725-0x10117dcbe3d0000, quorum=127.0.0.1:56452, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-08T00:35:08,243 INFO [M:0;3302f0f507bd:36725 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-08T00:35:08,243 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36725-0x10117dcbe3d0000, quorum=127.0.0.1:56452, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-08T00:35:08,250 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1bf97579{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-08T00:35:08,254 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@22b88bcb{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-08T00:35:08,254 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-08T00:35:08,254 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2d48d695{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-08T00:35:08,255 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@11effdcd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7a839e03-349b-445b-c5ab-d7393c746cf1/hadoop.log.dir/,STOPPED} 2024-11-08T00:35:08,258 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-08T00:35:08,258 WARN [BP-290164614-172.17.0.3-1731026007826 heartbeating to localhost/127.0.0.1:45125 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-08T00:35:08,258 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-08T00:35:08,258 WARN [BP-290164614-172.17.0.3-1731026007826 heartbeating to localhost/127.0.0.1:45125 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-290164614-172.17.0.3-1731026007826 (Datanode Uuid 56a7f2c7-ffed-426f-b640-73d18b454654) service to localhost/127.0.0.1:45125 2024-11-08T00:35:08,260 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7a839e03-349b-445b-c5ab-d7393c746cf1/cluster_f213e044-1888-1e6c-6192-3fb82960480e/data/data3/current/BP-290164614-172.17.0.3-1731026007826 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-08T00:35:08,260 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7a839e03-349b-445b-c5ab-d7393c746cf1/cluster_f213e044-1888-1e6c-6192-3fb82960480e/data/data4/current/BP-290164614-172.17.0.3-1731026007826 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-08T00:35:08,261 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-08T00:35:08,269 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7b07d1ba{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-08T00:35:08,269 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@43e0a762{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-08T00:35:08,269 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-08T00:35:08,270 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@371e191c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-08T00:35:08,270 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@28778f0f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7a839e03-349b-445b-c5ab-d7393c746cf1/hadoop.log.dir/,STOPPED} 2024-11-08T00:35:08,271 WARN [BP-290164614-172.17.0.3-1731026007826 heartbeating to localhost/127.0.0.1:45125 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-08T00:35:08,271 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-08T00:35:08,271 WARN [BP-290164614-172.17.0.3-1731026007826 heartbeating to localhost/127.0.0.1:45125 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-290164614-172.17.0.3-1731026007826 (Datanode Uuid a6303643-b343-4c5a-b957-5a3adf502b48) service to localhost/127.0.0.1:45125 2024-11-08T00:35:08,271 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-08T00:35:08,272 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7a839e03-349b-445b-c5ab-d7393c746cf1/cluster_f213e044-1888-1e6c-6192-3fb82960480e/data/data1/current/BP-290164614-172.17.0.3-1731026007826 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-08T00:35:08,272 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7a839e03-349b-445b-c5ab-d7393c746cf1/cluster_f213e044-1888-1e6c-6192-3fb82960480e/data/data2/current/BP-290164614-172.17.0.3-1731026007826 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-08T00:35:08,273 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-08T00:35:08,283 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@735fa16a{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-08T00:35:08,284 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6c26a5a3{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-08T00:35:08,284 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-08T00:35:08,284 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@70be1389{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-08T00:35:08,284 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@ddc8467{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7a839e03-349b-445b-c5ab-d7393c746cf1/hadoop.log.dir/,STOPPED} 2024-11-08T00:35:08,292 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-08T00:35:08,326 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-08T00:35:08,335 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testSlowSyncLogRolling Thread=81 (was 12) Potentially hanging thread: Async-Client-Retry-Timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:45125 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-2-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: ForkJoinPool-2-worker-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: nioEventLoopGroup-5-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'HBase' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: weak-ref-cleaner-strictcontextstorage java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:45125 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-2-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: SSL Certificates Store Monitor java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: HMaster-EventLoopGroup-1-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-2-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: region-location-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HBase-Metrics2-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-1-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:45125 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: org.apache.hadoop.hdfs.PeerCache@1ab9ef81 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-5-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-4-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: SessionTracker java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Potentially hanging thread: master/3302f0f507bd:0:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: ForkJoinPool-2-worker-6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: ForkJoinPool-2-worker-7 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: master/3302f0f507bd:0:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:45125 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: regionserver/3302f0f507bd:0.procedureResultReporter java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Potentially hanging thread: LeaseRenewer:jenkins@localhost:45125 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:45125 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-4 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-5-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: Idle-Rpc-Conn-Sweeper-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-1-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-4-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RpcClient-timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: LeaseRenewer:jenkins.hfs.0@localhost:45125 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-3-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-4-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: SnapshotHandlerChoreCleaner java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:45125 from jenkins.hfs.0 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: ForkJoinPool-2-worker-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: Time-limited test.named-queue-events-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: Monitor thread for TaskMonitor java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=402 (was 287) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=120 (was 75) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=6928 (was 7548) 2024-11-08T00:35:08,343 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnDatanodeDeath Thread=82, OpenFileDescriptor=402, MaxFileDescriptor=1048576, SystemLoadAverage=120, ProcessCount=11, AvailableMemoryMB=6927 2024-11-08T00:35:08,343 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-08T00:35:08,343 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7a839e03-349b-445b-c5ab-d7393c746cf1/hadoop.log.dir so I do NOT create it in target/test-data/3b3f81e4-f799-70da-ef23-dbcf0c2eba97 2024-11-08T00:35:08,343 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7a839e03-349b-445b-c5ab-d7393c746cf1/hadoop.tmp.dir so I do NOT create it in target/test-data/3b3f81e4-f799-70da-ef23-dbcf0c2eba97 2024-11-08T00:35:08,344 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3b3f81e4-f799-70da-ef23-dbcf0c2eba97/cluster_27fd3343-831d-be17-3af5-e5b42c9286b7, deleteOnExit=true 2024-11-08T00:35:08,344 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-08T00:35:08,344 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3b3f81e4-f799-70da-ef23-dbcf0c2eba97/test.cache.data in system properties and HBase conf 2024-11-08T00:35:08,344 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3b3f81e4-f799-70da-ef23-dbcf0c2eba97/hadoop.tmp.dir in system properties and HBase conf 2024-11-08T00:35:08,344 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3b3f81e4-f799-70da-ef23-dbcf0c2eba97/hadoop.log.dir in system properties and HBase conf 2024-11-08T00:35:08,344 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3b3f81e4-f799-70da-ef23-dbcf0c2eba97/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-08T00:35:08,345 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3b3f81e4-f799-70da-ef23-dbcf0c2eba97/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-08T00:35:08,345 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-08T00:35:08,345 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-08T00:35:08,345 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3b3f81e4-f799-70da-ef23-dbcf0c2eba97/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-08T00:35:08,345 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3b3f81e4-f799-70da-ef23-dbcf0c2eba97/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-08T00:35:08,345 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3b3f81e4-f799-70da-ef23-dbcf0c2eba97/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-08T00:35:08,345 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3b3f81e4-f799-70da-ef23-dbcf0c2eba97/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-08T00:35:08,346 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3b3f81e4-f799-70da-ef23-dbcf0c2eba97/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-08T00:35:08,346 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3b3f81e4-f799-70da-ef23-dbcf0c2eba97/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-08T00:35:08,346 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3b3f81e4-f799-70da-ef23-dbcf0c2eba97/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-08T00:35:08,346 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3b3f81e4-f799-70da-ef23-dbcf0c2eba97/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-08T00:35:08,346 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3b3f81e4-f799-70da-ef23-dbcf0c2eba97/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-08T00:35:08,346 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3b3f81e4-f799-70da-ef23-dbcf0c2eba97/nfs.dump.dir in system properties and HBase conf 2024-11-08T00:35:08,346 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3b3f81e4-f799-70da-ef23-dbcf0c2eba97/java.io.tmpdir in system properties and HBase conf 2024-11-08T00:35:08,347 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3b3f81e4-f799-70da-ef23-dbcf0c2eba97/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-08T00:35:08,347 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3b3f81e4-f799-70da-ef23-dbcf0c2eba97/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-08T00:35:08,347 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3b3f81e4-f799-70da-ef23-dbcf0c2eba97/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-08T00:35:08,362 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-08T00:35:08,788 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-08T00:35:08,796 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-08T00:35:08,806 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-08T00:35:08,806 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-08T00:35:08,806 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-08T00:35:08,807 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-08T00:35:08,807 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2f841e9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3b3f81e4-f799-70da-ef23-dbcf0c2eba97/hadoop.log.dir/,AVAILABLE} 2024-11-08T00:35:08,808 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@30a1c2a3{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-08T00:35:08,930 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7d95bc23{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3b3f81e4-f799-70da-ef23-dbcf0c2eba97/java.io.tmpdir/jetty-localhost-42753-hadoop-hdfs-3_4_1-tests_jar-_-any-1308450527918332029/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-08T00:35:08,931 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@78fa6004{HTTP/1.1, (http/1.1)}{localhost:42753} 2024-11-08T00:35:08,931 INFO [Time-limited test {}] server.Server(415): Started @103028ms 2024-11-08T00:35:08,948 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-08T00:35:09,200 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-08T00:35:09,204 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-08T00:35:09,208 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-08T00:35:09,208 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-08T00:35:09,208 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-08T00:35:09,208 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3e23c0c8{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3b3f81e4-f799-70da-ef23-dbcf0c2eba97/hadoop.log.dir/,AVAILABLE} 2024-11-08T00:35:09,209 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7517d9e5{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-08T00:35:09,312 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7d69c419{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3b3f81e4-f799-70da-ef23-dbcf0c2eba97/java.io.tmpdir/jetty-localhost-45465-hadoop-hdfs-3_4_1-tests_jar-_-any-15574175817238169215/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-08T00:35:09,312 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2f2378c9{HTTP/1.1, (http/1.1)}{localhost:45465} 2024-11-08T00:35:09,312 INFO [Time-limited test {}] server.Server(415): Started @103410ms 2024-11-08T00:35:09,314 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-08T00:35:09,367 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-08T00:35:09,372 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-08T00:35:09,373 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-08T00:35:09,373 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-08T00:35:09,373 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-08T00:35:09,373 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1ce533a5{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3b3f81e4-f799-70da-ef23-dbcf0c2eba97/hadoop.log.dir/,AVAILABLE} 2024-11-08T00:35:09,374 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@aab268d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-08T00:35:09,479 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@75434f63{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3b3f81e4-f799-70da-ef23-dbcf0c2eba97/java.io.tmpdir/jetty-localhost-40643-hadoop-hdfs-3_4_1-tests_jar-_-any-14993680127160540769/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-08T00:35:09,480 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1a2066f8{HTTP/1.1, (http/1.1)}{localhost:40643} 2024-11-08T00:35:09,480 INFO [Time-limited test {}] server.Server(415): Started @103577ms 2024-11-08T00:35:09,481 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-08T00:35:10,443 WARN [Thread-453 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3b3f81e4-f799-70da-ef23-dbcf0c2eba97/cluster_27fd3343-831d-be17-3af5-e5b42c9286b7/data/data1/current/BP-105332543-172.17.0.3-1731026108375/current, will proceed with Du for space computation calculation, 2024-11-08T00:35:10,443 WARN [Thread-454 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3b3f81e4-f799-70da-ef23-dbcf0c2eba97/cluster_27fd3343-831d-be17-3af5-e5b42c9286b7/data/data2/current/BP-105332543-172.17.0.3-1731026108375/current, will proceed with Du for space computation calculation, 2024-11-08T00:35:10,464 WARN [Thread-417 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-08T00:35:10,467 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x7fb6b3d29a96034e with lease ID 0xb76e360105a1c7b2: Processing first storage report for DS-531cba5e-a30f-4a61-a04b-e55ef0037d67 from datanode DatanodeRegistration(127.0.0.1:37901, datanodeUuid=9d25cddd-241b-4851-9300-3264378aa578, infoPort=35541, infoSecurePort=0, ipcPort=32921, storageInfo=lv=-57;cid=testClusterID;nsid=1870705477;c=1731026108375) 2024-11-08T00:35:10,467 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x7fb6b3d29a96034e with lease ID 0xb76e360105a1c7b2: from storage DS-531cba5e-a30f-4a61-a04b-e55ef0037d67 node DatanodeRegistration(127.0.0.1:37901, datanodeUuid=9d25cddd-241b-4851-9300-3264378aa578, infoPort=35541, infoSecurePort=0, ipcPort=32921, storageInfo=lv=-57;cid=testClusterID;nsid=1870705477;c=1731026108375), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-08T00:35:10,467 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x7fb6b3d29a96034e with lease ID 0xb76e360105a1c7b2: Processing first storage report for DS-5b37a15e-44ca-49ba-8835-f331aca08997 from datanode DatanodeRegistration(127.0.0.1:37901, datanodeUuid=9d25cddd-241b-4851-9300-3264378aa578, infoPort=35541, infoSecurePort=0, ipcPort=32921, storageInfo=lv=-57;cid=testClusterID;nsid=1870705477;c=1731026108375) 2024-11-08T00:35:10,467 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x7fb6b3d29a96034e with lease ID 0xb76e360105a1c7b2: from storage DS-5b37a15e-44ca-49ba-8835-f331aca08997 node DatanodeRegistration(127.0.0.1:37901, datanodeUuid=9d25cddd-241b-4851-9300-3264378aa578, infoPort=35541, infoSecurePort=0, ipcPort=32921, storageInfo=lv=-57;cid=testClusterID;nsid=1870705477;c=1731026108375), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-08T00:35:10,595 WARN [Thread-464 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3b3f81e4-f799-70da-ef23-dbcf0c2eba97/cluster_27fd3343-831d-be17-3af5-e5b42c9286b7/data/data3/current/BP-105332543-172.17.0.3-1731026108375/current, will proceed with Du for space computation calculation, 2024-11-08T00:35:10,595 WARN [Thread-465 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3b3f81e4-f799-70da-ef23-dbcf0c2eba97/cluster_27fd3343-831d-be17-3af5-e5b42c9286b7/data/data4/current/BP-105332543-172.17.0.3-1731026108375/current, will proceed with Du for space computation calculation, 2024-11-08T00:35:10,621 WARN [Thread-440 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-08T00:35:10,626 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xa29d94ef50c0ca2f with lease ID 0xb76e360105a1c7b3: Processing first storage report for DS-65acb5c2-e377-4a16-974a-b35b8034f595 from datanode DatanodeRegistration(127.0.0.1:42367, datanodeUuid=3c038d89-55c9-451f-8333-2b1ecba7be71, infoPort=42011, infoSecurePort=0, ipcPort=37331, storageInfo=lv=-57;cid=testClusterID;nsid=1870705477;c=1731026108375) 2024-11-08T00:35:10,626 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa29d94ef50c0ca2f with lease ID 0xb76e360105a1c7b3: from storage DS-65acb5c2-e377-4a16-974a-b35b8034f595 node DatanodeRegistration(127.0.0.1:42367, datanodeUuid=3c038d89-55c9-451f-8333-2b1ecba7be71, infoPort=42011, infoSecurePort=0, ipcPort=37331, storageInfo=lv=-57;cid=testClusterID;nsid=1870705477;c=1731026108375), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-08T00:35:10,626 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xa29d94ef50c0ca2f with lease ID 0xb76e360105a1c7b3: Processing first storage report for DS-f51a1298-71b5-4ced-99fa-c0bc1a653a7a from datanode DatanodeRegistration(127.0.0.1:42367, datanodeUuid=3c038d89-55c9-451f-8333-2b1ecba7be71, infoPort=42011, infoSecurePort=0, ipcPort=37331, storageInfo=lv=-57;cid=testClusterID;nsid=1870705477;c=1731026108375) 2024-11-08T00:35:10,626 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa29d94ef50c0ca2f with lease ID 0xb76e360105a1c7b3: from storage DS-f51a1298-71b5-4ced-99fa-c0bc1a653a7a node DatanodeRegistration(127.0.0.1:42367, datanodeUuid=3c038d89-55c9-451f-8333-2b1ecba7be71, infoPort=42011, infoSecurePort=0, ipcPort=37331, storageInfo=lv=-57;cid=testClusterID;nsid=1870705477;c=1731026108375), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-08T00:35:10,724 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3b3f81e4-f799-70da-ef23-dbcf0c2eba97 2024-11-08T00:35:10,727 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3b3f81e4-f799-70da-ef23-dbcf0c2eba97/cluster_27fd3343-831d-be17-3af5-e5b42c9286b7/zookeeper_0, clientPort=52740, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3b3f81e4-f799-70da-ef23-dbcf0c2eba97/cluster_27fd3343-831d-be17-3af5-e5b42c9286b7/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3b3f81e4-f799-70da-ef23-dbcf0c2eba97/cluster_27fd3343-831d-be17-3af5-e5b42c9286b7/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-08T00:35:10,729 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=52740 2024-11-08T00:35:10,729 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-08T00:35:10,731 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-08T00:35:10,742 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42367 is added to blk_1073741825_1001 (size=7) 2024-11-08T00:35:10,742 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37901 is added to blk_1073741825_1001 (size=7) 2024-11-08T00:35:10,744 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:33309/user/jenkins/test-data/af5918b1-628a-da39-aa17-a025de318f8e with version=8 2024-11-08T00:35:10,744 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:45125/user/jenkins/test-data/a52118f7-c961-aacf-3827-5de0e737d868/hbase-staging 2024-11-08T00:35:10,747 INFO [Time-limited test {}] client.ConnectionUtils(128): master/3302f0f507bd:0 server-side Connection retries=45 2024-11-08T00:35:10,747 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-08T00:35:10,747 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-08T00:35:10,747 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-08T00:35:10,747 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-08T00:35:10,747 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-08T00:35:10,747 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-08T00:35:10,748 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-08T00:35:10,748 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:33401 2024-11-08T00:35:10,751 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:33401 connecting to ZooKeeper ensemble=127.0.0.1:52740 2024-11-08T00:35:10,822 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:334010x0, quorum=127.0.0.1:52740, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-08T00:35:10,823 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:33401-0x10117de44b20000 connected 2024-11-08T00:35:10,911 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-08T00:35:10,913 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-08T00:35:10,916 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:33401-0x10117de44b20000, quorum=127.0.0.1:52740, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-08T00:35:10,916 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:33309/user/jenkins/test-data/af5918b1-628a-da39-aa17-a025de318f8e, hbase.cluster.distributed=false 2024-11-08T00:35:10,918 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:33401-0x10117de44b20000, quorum=127.0.0.1:52740, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-08T00:35:10,918 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=33401 2024-11-08T00:35:10,919 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=33401 2024-11-08T00:35:10,919 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=33401 2024-11-08T00:35:10,919 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=33401 2024-11-08T00:35:10,920 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=33401 2024-11-08T00:35:10,937 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/3302f0f507bd:0 server-side Connection retries=45 2024-11-08T00:35:10,937 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-08T00:35:10,937 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-08T00:35:10,938 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-08T00:35:10,938 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-08T00:35:10,938 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-08T00:35:10,938 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-08T00:35:10,938 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-08T00:35:10,939 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:39261 2024-11-08T00:35:10,940 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:39261 connecting to ZooKeeper ensemble=127.0.0.1:52740 2024-11-08T00:35:10,941 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-08T00:35:10,944 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-08T00:35:10,953 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:392610x0, quorum=127.0.0.1:52740, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-08T00:35:10,954 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39261-0x10117de44b20001, quorum=127.0.0.1:52740, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-08T00:35:10,954 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:39261-0x10117de44b20001 connected 2024-11-08T00:35:10,954 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-08T00:35:10,955 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-08T00:35:10,955 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39261-0x10117de44b20001, quorum=127.0.0.1:52740, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-08T00:35:10,957 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39261-0x10117de44b20001, quorum=127.0.0.1:52740, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-08T00:35:10,957 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=39261 2024-11-08T00:35:10,957 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=39261 2024-11-08T00:35:10,960 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=39261 2024-11-08T00:35:10,961 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=39261 2024-11-08T00:35:10,962 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=39261 2024-11-08T00:35:10,976 DEBUG [M:0;3302f0f507bd:33401 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;3302f0f507bd:33401 2024-11-08T00:35:10,976 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/3302f0f507bd,33401,1731026110746 2024-11-08T00:35:10,985 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33401-0x10117de44b20000, quorum=127.0.0.1:52740, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-08T00:35:10,985 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39261-0x10117de44b20001, quorum=127.0.0.1:52740, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-08T00:35:10,985 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:33401-0x10117de44b20000, quorum=127.0.0.1:52740, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/3302f0f507bd,33401,1731026110746 2024-11-08T00:35:10,995 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39261-0x10117de44b20001, quorum=127.0.0.1:52740, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-08T00:35:10,995 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33401-0x10117de44b20000, quorum=127.0.0.1:52740, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T00:35:10,995 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39261-0x10117de44b20001, quorum=127.0.0.1:52740, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T00:35:11,001 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:33401-0x10117de44b20000, quorum=127.0.0.1:52740, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-08T00:35:11,004 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/3302f0f507bd,33401,1731026110746 from backup master directory 2024-11-08T00:35:11,016 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33401-0x10117de44b20000, quorum=127.0.0.1:52740, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/3302f0f507bd,33401,1731026110746 2024-11-08T00:35:11,016 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39261-0x10117de44b20001, quorum=127.0.0.1:52740, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-08T00:35:11,016 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33401-0x10117de44b20000, quorum=127.0.0.1:52740, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-08T00:35:11,016 WARN [master/3302f0f507bd:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-08T00:35:11,017 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=3302f0f507bd,33401,1731026110746 2024-11-08T00:35:11,022 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:33309/user/jenkins/test-data/af5918b1-628a-da39-aa17-a025de318f8e/hbase.id] with ID: 10b9ef35-0419-4866-90ad-490818d61868 2024-11-08T00:35:11,023 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:33309/user/jenkins/test-data/af5918b1-628a-da39-aa17-a025de318f8e/.tmp/hbase.id 2024-11-08T00:35:11,030 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37901 is added to blk_1073741826_1002 (size=42) 2024-11-08T00:35:11,030 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42367 is added to blk_1073741826_1002 (size=42) 2024-11-08T00:35:11,031 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:33309/user/jenkins/test-data/af5918b1-628a-da39-aa17-a025de318f8e/.tmp/hbase.id]:[hdfs://localhost:33309/user/jenkins/test-data/af5918b1-628a-da39-aa17-a025de318f8e/hbase.id] 2024-11-08T00:35:11,048 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-08T00:35:11,048 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-08T00:35:11,050 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-11-08T00:35:11,058 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33401-0x10117de44b20000, quorum=127.0.0.1:52740, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T00:35:11,058 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39261-0x10117de44b20001, quorum=127.0.0.1:52740, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T00:35:11,070 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37901 is added to blk_1073741827_1003 (size=196) 2024-11-08T00:35:11,070 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42367 is added to blk_1073741827_1003 (size=196) 2024-11-08T00:35:11,071 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-08T00:35:11,072 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-08T00:35:11,073 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-08T00:35:11,081 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42367 is added to blk_1073741828_1004 (size=1189) 2024-11-08T00:35:11,082 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37901 is added to blk_1073741828_1004 (size=1189) 2024-11-08T00:35:11,083 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:33309/user/jenkins/test-data/af5918b1-628a-da39-aa17-a025de318f8e/MasterData/data/master/store 2024-11-08T00:35:11,093 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37901 is added to blk_1073741829_1005 (size=34) 2024-11-08T00:35:11,093 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42367 is added to blk_1073741829_1005 (size=34) 2024-11-08T00:35:11,094 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-08T00:35:11,094 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-08T00:35:11,094 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-08T00:35:11,094 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-08T00:35:11,094 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-08T00:35:11,094 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-08T00:35:11,094 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-08T00:35:11,094 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731026111094Disabling compacts and flushes for region at 1731026111094Disabling writes for close at 1731026111094Writing region close event to WAL at 1731026111094Closed at 1731026111094 2024-11-08T00:35:11,095 WARN [master/3302f0f507bd:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:33309/user/jenkins/test-data/af5918b1-628a-da39-aa17-a025de318f8e/MasterData/data/master/store/.initializing 2024-11-08T00:35:11,096 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:33309/user/jenkins/test-data/af5918b1-628a-da39-aa17-a025de318f8e/MasterData/WALs/3302f0f507bd,33401,1731026110746 2024-11-08T00:35:11,099 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=3302f0f507bd%2C33401%2C1731026110746, suffix=, logDir=hdfs://localhost:33309/user/jenkins/test-data/af5918b1-628a-da39-aa17-a025de318f8e/MasterData/WALs/3302f0f507bd,33401,1731026110746, archiveDir=hdfs://localhost:33309/user/jenkins/test-data/af5918b1-628a-da39-aa17-a025de318f8e/MasterData/oldWALs, maxLogs=10 2024-11-08T00:35:11,099 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3302f0f507bd%2C33401%2C1731026110746.1731026111099 2024-11-08T00:35:11,105 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/af5918b1-628a-da39-aa17-a025de318f8e/MasterData/WALs/3302f0f507bd,33401,1731026110746/3302f0f507bd%2C33401%2C1731026110746.1731026111099 2024-11-08T00:35:11,106 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42011:42011),(127.0.0.1/127.0.0.1:35541:35541)] 2024-11-08T00:35:11,106 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-08T00:35:11,106 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-08T00:35:11,107 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-08T00:35:11,107 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-08T00:35:11,108 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-08T00:35:11,110 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-08T00:35:11,110 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T00:35:11,111 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-08T00:35:11,111 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-08T00:35:11,113 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-08T00:35:11,113 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T00:35:11,116 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-08T00:35:11,116 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-08T00:35:11,121 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-08T00:35:11,121 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T00:35:11,123 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-08T00:35:11,123 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-08T00:35:11,125 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-08T00:35:11,125 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T00:35:11,126 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-08T00:35:11,127 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-08T00:35:11,128 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33309/user/jenkins/test-data/af5918b1-628a-da39-aa17-a025de318f8e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-08T00:35:11,128 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33309/user/jenkins/test-data/af5918b1-628a-da39-aa17-a025de318f8e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-08T00:35:11,130 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-08T00:35:11,130 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-08T00:35:11,131 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-08T00:35:11,133 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-08T00:35:11,137 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33309/user/jenkins/test-data/af5918b1-628a-da39-aa17-a025de318f8e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-08T00:35:11,137 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=786760, jitterRate=4.183053970336914E-4}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-08T00:35:11,139 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731026111107Initializing all the Stores at 1731026111108 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731026111108Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731026111108Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731026111108Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731026111108Cleaning up temporary data from old regions at 1731026111130 (+22 ms)Region opened successfully at 1731026111139 (+9 ms) 2024-11-08T00:35:11,139 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-08T00:35:11,144 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@199420a7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=3302f0f507bd/172.17.0.3:0 2024-11-08T00:35:11,145 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-08T00:35:11,145 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-08T00:35:11,146 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-08T00:35:11,146 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-08T00:35:11,147 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-08T00:35:11,147 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-08T00:35:11,147 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-08T00:35:11,150 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-08T00:35:11,151 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33401-0x10117de44b20000, quorum=127.0.0.1:52740, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-08T00:35:11,163 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-08T00:35:11,164 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-08T00:35:11,165 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33401-0x10117de44b20000, quorum=127.0.0.1:52740, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-08T00:35:11,174 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-08T00:35:11,175 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-08T00:35:11,176 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33401-0x10117de44b20000, quorum=127.0.0.1:52740, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-08T00:35:11,184 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-08T00:35:11,186 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33401-0x10117de44b20000, quorum=127.0.0.1:52740, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-08T00:35:11,195 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-08T00:35:11,198 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33401-0x10117de44b20000, quorum=127.0.0.1:52740, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-08T00:35:11,205 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-08T00:35:11,293 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39261-0x10117de44b20001, quorum=127.0.0.1:52740, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-08T00:35:11,293 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33401-0x10117de44b20000, quorum=127.0.0.1:52740, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-08T00:35:11,293 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33401-0x10117de44b20000, quorum=127.0.0.1:52740, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T00:35:11,293 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39261-0x10117de44b20001, quorum=127.0.0.1:52740, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T00:35:11,294 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=3302f0f507bd,33401,1731026110746, sessionid=0x10117de44b20000, setting cluster-up flag (Was=false) 2024-11-08T00:35:11,437 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33401-0x10117de44b20000, quorum=127.0.0.1:52740, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T00:35:11,437 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39261-0x10117de44b20001, quorum=127.0.0.1:52740, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T00:35:11,469 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-08T00:35:11,471 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=3302f0f507bd,33401,1731026110746 2024-11-08T00:35:11,490 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39261-0x10117de44b20001, quorum=127.0.0.1:52740, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T00:35:11,490 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33401-0x10117de44b20000, quorum=127.0.0.1:52740, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T00:35:11,521 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-08T00:35:11,523 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=3302f0f507bd,33401,1731026110746 2024-11-08T00:35:11,524 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:33309/user/jenkins/test-data/af5918b1-628a-da39-aa17-a025de318f8e/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-08T00:35:11,526 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-08T00:35:11,527 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-08T00:35:11,527 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-08T00:35:11,527 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 3302f0f507bd,33401,1731026110746 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-08T00:35:11,529 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/3302f0f507bd:0, corePoolSize=5, maxPoolSize=5 2024-11-08T00:35:11,529 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/3302f0f507bd:0, corePoolSize=5, maxPoolSize=5 2024-11-08T00:35:11,529 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/3302f0f507bd:0, corePoolSize=5, maxPoolSize=5 2024-11-08T00:35:11,529 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/3302f0f507bd:0, corePoolSize=5, maxPoolSize=5 2024-11-08T00:35:11,529 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/3302f0f507bd:0, corePoolSize=10, maxPoolSize=10 2024-11-08T00:35:11,529 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/3302f0f507bd:0, corePoolSize=1, maxPoolSize=1 2024-11-08T00:35:11,530 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/3302f0f507bd:0, corePoolSize=2, maxPoolSize=2 2024-11-08T00:35:11,530 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/3302f0f507bd:0, corePoolSize=1, maxPoolSize=1 2024-11-08T00:35:11,532 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-08T00:35:11,532 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-08T00:35:11,534 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T00:35:11,534 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-08T00:35:11,534 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731026141534 2024-11-08T00:35:11,534 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-08T00:35:11,535 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-08T00:35:11,535 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-08T00:35:11,535 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-08T00:35:11,535 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-08T00:35:11,535 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-08T00:35:11,535 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-08T00:35:11,536 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-08T00:35:11,536 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-08T00:35:11,536 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-08T00:35:11,537 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-08T00:35:11,537 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-08T00:35:11,537 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/3302f0f507bd:0:becomeActiveMaster-HFileCleaner.large.0-1731026111537,5,FailOnTimeoutGroup] 2024-11-08T00:35:11,537 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/3302f0f507bd:0:becomeActiveMaster-HFileCleaner.small.0-1731026111537,5,FailOnTimeoutGroup] 2024-11-08T00:35:11,537 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-08T00:35:11,538 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-08T00:35:11,538 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-08T00:35:11,538 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-08T00:35:11,541 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37901 is added to blk_1073741831_1007 (size=1321) 2024-11-08T00:35:11,542 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42367 is added to blk_1073741831_1007 (size=1321) 2024-11-08T00:35:11,542 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:33309/user/jenkins/test-data/af5918b1-628a-da39-aa17-a025de318f8e/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-08T00:35:11,543 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:33309/user/jenkins/test-data/af5918b1-628a-da39-aa17-a025de318f8e 2024-11-08T00:35:11,549 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42367 is added to blk_1073741832_1008 (size=32) 2024-11-08T00:35:11,550 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37901 is added to blk_1073741832_1008 (size=32) 2024-11-08T00:35:11,550 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-08T00:35:11,552 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-08T00:35:11,554 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-08T00:35:11,554 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T00:35:11,555 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-08T00:35:11,555 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-08T00:35:11,556 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-08T00:35:11,557 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T00:35:11,557 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-08T00:35:11,557 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-08T00:35:11,559 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-08T00:35:11,559 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T00:35:11,560 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-08T00:35:11,560 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-08T00:35:11,562 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-08T00:35:11,562 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T00:35:11,563 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-08T00:35:11,563 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-08T00:35:11,564 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33309/user/jenkins/test-data/af5918b1-628a-da39-aa17-a025de318f8e/data/hbase/meta/1588230740 2024-11-08T00:35:11,564 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33309/user/jenkins/test-data/af5918b1-628a-da39-aa17-a025de318f8e/data/hbase/meta/1588230740 2024-11-08T00:35:11,566 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-08T00:35:11,566 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-08T00:35:11,567 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-08T00:35:11,568 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-08T00:35:11,569 INFO [RS:0;3302f0f507bd:39261 {}] regionserver.HRegionServer(746): ClusterId : 10b9ef35-0419-4866-90ad-490818d61868 2024-11-08T00:35:11,569 DEBUG [RS:0;3302f0f507bd:39261 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-08T00:35:11,571 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33309/user/jenkins/test-data/af5918b1-628a-da39-aa17-a025de318f8e/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-08T00:35:11,572 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=777396, jitterRate=-0.011490106582641602}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-08T00:35:11,573 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731026111551Initializing all the Stores at 1731026111551Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731026111551Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731026111552 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731026111552Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731026111552Cleaning up temporary data from old regions at 1731026111566 (+14 ms)Region opened successfully at 1731026111573 (+7 ms) 2024-11-08T00:35:11,573 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-08T00:35:11,573 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-08T00:35:11,573 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-08T00:35:11,573 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-08T00:35:11,573 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-08T00:35:11,574 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-08T00:35:11,574 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731026111573Disabling compacts and flushes for region at 1731026111573Disabling writes for close at 1731026111573Writing region close event to WAL at 1731026111574 (+1 ms)Closed at 1731026111574 2024-11-08T00:35:11,576 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-08T00:35:11,576 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-08T00:35:11,576 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-08T00:35:11,578 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-08T00:35:11,580 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-08T00:35:11,583 DEBUG [RS:0;3302f0f507bd:39261 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-08T00:35:11,583 DEBUG [RS:0;3302f0f507bd:39261 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-08T00:35:11,596 DEBUG [RS:0;3302f0f507bd:39261 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-08T00:35:11,596 DEBUG [RS:0;3302f0f507bd:39261 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@bc3feb8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=3302f0f507bd/172.17.0.3:0 2024-11-08T00:35:11,615 DEBUG [RS:0;3302f0f507bd:39261 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;3302f0f507bd:39261 2024-11-08T00:35:11,615 INFO [RS:0;3302f0f507bd:39261 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-08T00:35:11,615 INFO [RS:0;3302f0f507bd:39261 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-08T00:35:11,615 DEBUG [RS:0;3302f0f507bd:39261 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-08T00:35:11,616 INFO [RS:0;3302f0f507bd:39261 {}] regionserver.HRegionServer(2659): reportForDuty to master=3302f0f507bd,33401,1731026110746 with port=39261, startcode=1731026110937 2024-11-08T00:35:11,617 DEBUG [RS:0;3302f0f507bd:39261 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-08T00:35:11,620 INFO [HMaster-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:37537, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-11-08T00:35:11,621 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33401 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 3302f0f507bd,39261,1731026110937 2024-11-08T00:35:11,621 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33401 {}] master.ServerManager(517): Registering regionserver=3302f0f507bd,39261,1731026110937 2024-11-08T00:35:11,624 DEBUG [RS:0;3302f0f507bd:39261 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:33309/user/jenkins/test-data/af5918b1-628a-da39-aa17-a025de318f8e 2024-11-08T00:35:11,624 DEBUG [RS:0;3302f0f507bd:39261 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:33309 2024-11-08T00:35:11,624 DEBUG [RS:0;3302f0f507bd:39261 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-08T00:35:11,637 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33401-0x10117de44b20000, quorum=127.0.0.1:52740, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-08T00:35:11,638 DEBUG [RS:0;3302f0f507bd:39261 {}] zookeeper.ZKUtil(111): regionserver:39261-0x10117de44b20001, quorum=127.0.0.1:52740, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/3302f0f507bd,39261,1731026110937 2024-11-08T00:35:11,638 WARN [RS:0;3302f0f507bd:39261 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-08T00:35:11,638 INFO [RS:0;3302f0f507bd:39261 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-08T00:35:11,638 DEBUG [RS:0;3302f0f507bd:39261 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:33309/user/jenkins/test-data/af5918b1-628a-da39-aa17-a025de318f8e/WALs/3302f0f507bd,39261,1731026110937 2024-11-08T00:35:11,638 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [3302f0f507bd,39261,1731026110937] 2024-11-08T00:35:11,643 INFO [RS:0;3302f0f507bd:39261 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-08T00:35:11,647 INFO [RS:0;3302f0f507bd:39261 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-08T00:35:11,647 INFO [RS:0;3302f0f507bd:39261 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-08T00:35:11,647 INFO [RS:0;3302f0f507bd:39261 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-08T00:35:11,648 INFO [RS:0;3302f0f507bd:39261 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-08T00:35:11,649 INFO [RS:0;3302f0f507bd:39261 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-08T00:35:11,649 INFO [RS:0;3302f0f507bd:39261 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-08T00:35:11,649 DEBUG [RS:0;3302f0f507bd:39261 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/3302f0f507bd:0, corePoolSize=1, maxPoolSize=1 2024-11-08T00:35:11,650 DEBUG [RS:0;3302f0f507bd:39261 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/3302f0f507bd:0, corePoolSize=1, maxPoolSize=1 2024-11-08T00:35:11,650 DEBUG [RS:0;3302f0f507bd:39261 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/3302f0f507bd:0, corePoolSize=1, maxPoolSize=1 2024-11-08T00:35:11,650 DEBUG [RS:0;3302f0f507bd:39261 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/3302f0f507bd:0, corePoolSize=1, maxPoolSize=1 2024-11-08T00:35:11,650 DEBUG [RS:0;3302f0f507bd:39261 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/3302f0f507bd:0, corePoolSize=1, maxPoolSize=1 2024-11-08T00:35:11,650 DEBUG [RS:0;3302f0f507bd:39261 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/3302f0f507bd:0, corePoolSize=2, maxPoolSize=2 2024-11-08T00:35:11,650 DEBUG [RS:0;3302f0f507bd:39261 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/3302f0f507bd:0, corePoolSize=1, maxPoolSize=1 2024-11-08T00:35:11,650 DEBUG [RS:0;3302f0f507bd:39261 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/3302f0f507bd:0, corePoolSize=1, maxPoolSize=1 2024-11-08T00:35:11,650 DEBUG [RS:0;3302f0f507bd:39261 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/3302f0f507bd:0, corePoolSize=1, maxPoolSize=1 2024-11-08T00:35:11,651 DEBUG [RS:0;3302f0f507bd:39261 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/3302f0f507bd:0, corePoolSize=1, maxPoolSize=1 2024-11-08T00:35:11,651 DEBUG [RS:0;3302f0f507bd:39261 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/3302f0f507bd:0, corePoolSize=1, maxPoolSize=1 2024-11-08T00:35:11,651 DEBUG [RS:0;3302f0f507bd:39261 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/3302f0f507bd:0, corePoolSize=1, maxPoolSize=1 2024-11-08T00:35:11,651 DEBUG [RS:0;3302f0f507bd:39261 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/3302f0f507bd:0, corePoolSize=3, maxPoolSize=3 2024-11-08T00:35:11,651 DEBUG [RS:0;3302f0f507bd:39261 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/3302f0f507bd:0, corePoolSize=3, maxPoolSize=3 2024-11-08T00:35:11,655 INFO [RS:0;3302f0f507bd:39261 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-08T00:35:11,656 INFO [RS:0;3302f0f507bd:39261 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-08T00:35:11,656 INFO [RS:0;3302f0f507bd:39261 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-08T00:35:11,656 INFO [RS:0;3302f0f507bd:39261 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-08T00:35:11,656 INFO [RS:0;3302f0f507bd:39261 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-08T00:35:11,656 INFO [RS:0;3302f0f507bd:39261 {}] hbase.ChoreService(168): Chore ScheduledChore name=3302f0f507bd,39261,1731026110937-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-08T00:35:11,675 INFO [RS:0;3302f0f507bd:39261 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-08T00:35:11,675 INFO [RS:0;3302f0f507bd:39261 {}] hbase.ChoreService(168): Chore ScheduledChore name=3302f0f507bd,39261,1731026110937-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-08T00:35:11,676 INFO [RS:0;3302f0f507bd:39261 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-08T00:35:11,676 INFO [RS:0;3302f0f507bd:39261 {}] regionserver.Replication(171): 3302f0f507bd,39261,1731026110937 started 2024-11-08T00:35:11,697 INFO [RS:0;3302f0f507bd:39261 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-08T00:35:11,698 INFO [RS:0;3302f0f507bd:39261 {}] regionserver.HRegionServer(1482): Serving as 3302f0f507bd,39261,1731026110937, RpcServer on 3302f0f507bd/172.17.0.3:39261, sessionid=0x10117de44b20001 2024-11-08T00:35:11,698 DEBUG [RS:0;3302f0f507bd:39261 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-08T00:35:11,698 DEBUG [RS:0;3302f0f507bd:39261 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 3302f0f507bd,39261,1731026110937 2024-11-08T00:35:11,698 DEBUG [RS:0;3302f0f507bd:39261 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '3302f0f507bd,39261,1731026110937' 2024-11-08T00:35:11,698 DEBUG [RS:0;3302f0f507bd:39261 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-08T00:35:11,699 DEBUG [RS:0;3302f0f507bd:39261 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-08T00:35:11,699 DEBUG [RS:0;3302f0f507bd:39261 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-08T00:35:11,700 DEBUG [RS:0;3302f0f507bd:39261 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-08T00:35:11,700 DEBUG [RS:0;3302f0f507bd:39261 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 3302f0f507bd,39261,1731026110937 2024-11-08T00:35:11,700 DEBUG [RS:0;3302f0f507bd:39261 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '3302f0f507bd,39261,1731026110937' 2024-11-08T00:35:11,700 DEBUG [RS:0;3302f0f507bd:39261 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-08T00:35:11,700 DEBUG [RS:0;3302f0f507bd:39261 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-08T00:35:11,701 DEBUG [RS:0;3302f0f507bd:39261 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-08T00:35:11,701 INFO [RS:0;3302f0f507bd:39261 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-08T00:35:11,701 INFO [RS:0;3302f0f507bd:39261 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-08T00:35:11,730 WARN [3302f0f507bd:33401 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-08T00:35:11,805 INFO [RS:0;3302f0f507bd:39261 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=3302f0f507bd%2C39261%2C1731026110937, suffix=, logDir=hdfs://localhost:33309/user/jenkins/test-data/af5918b1-628a-da39-aa17-a025de318f8e/WALs/3302f0f507bd,39261,1731026110937, archiveDir=hdfs://localhost:33309/user/jenkins/test-data/af5918b1-628a-da39-aa17-a025de318f8e/oldWALs, maxLogs=32 2024-11-08T00:35:11,808 INFO [RS:0;3302f0f507bd:39261 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3302f0f507bd%2C39261%2C1731026110937.1731026111808 2024-11-08T00:35:11,817 INFO [RS:0;3302f0f507bd:39261 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/af5918b1-628a-da39-aa17-a025de318f8e/WALs/3302f0f507bd,39261,1731026110937/3302f0f507bd%2C39261%2C1731026110937.1731026111808 2024-11-08T00:35:11,818 DEBUG [RS:0;3302f0f507bd:39261 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42011:42011),(127.0.0.1/127.0.0.1:35541:35541)] 2024-11-08T00:35:11,966 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-08T00:35:11,967 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-08T00:35:11,969 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling 2024-11-08T00:35:11,981 DEBUG [3302f0f507bd:33401 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-08T00:35:11,981 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=3302f0f507bd,39261,1731026110937 2024-11-08T00:35:11,983 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 3302f0f507bd,39261,1731026110937, state=OPENING 2024-11-08T00:35:12,033 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-08T00:35:12,089 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33401-0x10117de44b20000, quorum=127.0.0.1:52740, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T00:35:12,089 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39261-0x10117de44b20001, quorum=127.0.0.1:52740, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T00:35:12,090 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-08T00:35:12,090 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-08T00:35:12,090 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-08T00:35:12,090 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=3302f0f507bd,39261,1731026110937}] 2024-11-08T00:35:12,245 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-08T00:35:12,247 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:38569, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-08T00:35:12,253 INFO [RS_OPEN_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-08T00:35:12,253 INFO [RS_OPEN_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-08T00:35:12,256 INFO [RS_OPEN_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=3302f0f507bd%2C39261%2C1731026110937.meta, suffix=.meta, logDir=hdfs://localhost:33309/user/jenkins/test-data/af5918b1-628a-da39-aa17-a025de318f8e/WALs/3302f0f507bd,39261,1731026110937, archiveDir=hdfs://localhost:33309/user/jenkins/test-data/af5918b1-628a-da39-aa17-a025de318f8e/oldWALs, maxLogs=32 2024-11-08T00:35:12,259 INFO [RS_OPEN_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 3302f0f507bd%2C39261%2C1731026110937.meta.1731026112258.meta 2024-11-08T00:35:12,266 INFO [RS_OPEN_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/af5918b1-628a-da39-aa17-a025de318f8e/WALs/3302f0f507bd,39261,1731026110937/3302f0f507bd%2C39261%2C1731026110937.meta.1731026112258.meta 2024-11-08T00:35:12,267 DEBUG [RS_OPEN_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35541:35541),(127.0.0.1/127.0.0.1:42011:42011)] 2024-11-08T00:35:12,268 DEBUG [RS_OPEN_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-08T00:35:12,268 DEBUG [RS_OPEN_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-08T00:35:12,268 DEBUG [RS_OPEN_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-08T00:35:12,268 INFO [RS_OPEN_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-08T00:35:12,268 DEBUG [RS_OPEN_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-08T00:35:12,269 DEBUG [RS_OPEN_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-08T00:35:12,269 DEBUG [RS_OPEN_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-08T00:35:12,269 DEBUG [RS_OPEN_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-08T00:35:12,271 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-08T00:35:12,272 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-08T00:35:12,272 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T00:35:12,273 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-08T00:35:12,273 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-08T00:35:12,274 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-08T00:35:12,274 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T00:35:12,275 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-08T00:35:12,275 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-08T00:35:12,276 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-08T00:35:12,276 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T00:35:12,277 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-08T00:35:12,277 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-08T00:35:12,278 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-08T00:35:12,278 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T00:35:12,279 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-08T00:35:12,279 DEBUG [RS_OPEN_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-08T00:35:12,281 DEBUG [RS_OPEN_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33309/user/jenkins/test-data/af5918b1-628a-da39-aa17-a025de318f8e/data/hbase/meta/1588230740 2024-11-08T00:35:12,282 DEBUG [RS_OPEN_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33309/user/jenkins/test-data/af5918b1-628a-da39-aa17-a025de318f8e/data/hbase/meta/1588230740 2024-11-08T00:35:12,284 DEBUG [RS_OPEN_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-08T00:35:12,284 DEBUG [RS_OPEN_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-08T00:35:12,285 DEBUG [RS_OPEN_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-08T00:35:12,286 DEBUG [RS_OPEN_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-08T00:35:12,288 INFO [RS_OPEN_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=829721, jitterRate=0.05504588782787323}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-08T00:35:12,288 DEBUG [RS_OPEN_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-08T00:35:12,289 DEBUG [RS_OPEN_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731026112269Writing region info on filesystem at 1731026112269Initializing all the Stores at 1731026112270 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731026112270Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731026112270Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731026112270Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731026112271 (+1 ms)Cleaning up temporary data from old regions at 1731026112284 (+13 ms)Running coprocessor post-open hooks at 1731026112288 (+4 ms)Region opened successfully at 1731026112289 (+1 ms) 2024-11-08T00:35:12,291 INFO [RS_OPEN_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731026112245 2024-11-08T00:35:12,294 DEBUG [RS_OPEN_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-08T00:35:12,294 INFO [RS_OPEN_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-08T00:35:12,295 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=3302f0f507bd,39261,1731026110937 2024-11-08T00:35:12,296 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 3302f0f507bd,39261,1731026110937, state=OPEN 2024-11-08T00:35:12,423 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39261-0x10117de44b20001, quorum=127.0.0.1:52740, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-08T00:35:12,423 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33401-0x10117de44b20000, quorum=127.0.0.1:52740, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-08T00:35:12,423 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=3302f0f507bd,39261,1731026110937 2024-11-08T00:35:12,423 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-08T00:35:12,423 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-08T00:35:12,430 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-08T00:35:12,430 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=3302f0f507bd,39261,1731026110937 in 333 msec 2024-11-08T00:35:12,435 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-08T00:35:12,435 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 854 msec 2024-11-08T00:35:12,437 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-08T00:35:12,437 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-08T00:35:12,439 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-08T00:35:12,439 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3302f0f507bd,39261,1731026110937, seqNum=-1] 2024-11-08T00:35:12,439 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-08T00:35:12,441 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:41901, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-08T00:35:12,449 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 922 msec 2024-11-08T00:35:12,449 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731026112449, completionTime=-1 2024-11-08T00:35:12,450 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-08T00:35:12,450 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-08T00:35:12,452 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-08T00:35:12,452 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731026172452 2024-11-08T00:35:12,452 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731026232452 2024-11-08T00:35:12,452 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 2 msec 2024-11-08T00:35:12,453 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3302f0f507bd,33401,1731026110746-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-08T00:35:12,453 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3302f0f507bd,33401,1731026110746-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-08T00:35:12,453 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3302f0f507bd,33401,1731026110746-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-08T00:35:12,453 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-3302f0f507bd:33401, period=300000, unit=MILLISECONDS is enabled. 2024-11-08T00:35:12,453 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-08T00:35:12,453 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-08T00:35:12,455 DEBUG [master/3302f0f507bd:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-08T00:35:12,458 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.440sec 2024-11-08T00:35:12,458 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-08T00:35:12,458 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-08T00:35:12,458 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-08T00:35:12,458 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-08T00:35:12,458 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-08T00:35:12,458 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3302f0f507bd,33401,1731026110746-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-08T00:35:12,458 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3302f0f507bd,33401,1731026110746-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-08T00:35:12,461 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-08T00:35:12,461 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-08T00:35:12,461 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3302f0f507bd,33401,1731026110746-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-08T00:35:12,470 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3aadf114, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-08T00:35:12,470 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 3302f0f507bd,33401,-1 for getting cluster id 2024-11-08T00:35:12,470 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-08T00:35:12,472 DEBUG [HMaster-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '10b9ef35-0419-4866-90ad-490818d61868' 2024-11-08T00:35:12,473 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-08T00:35:12,473 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "10b9ef35-0419-4866-90ad-490818d61868" 2024-11-08T00:35:12,473 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@16418d1a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-08T00:35:12,473 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [3302f0f507bd,33401,-1] 2024-11-08T00:35:12,474 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-08T00:35:12,474 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-08T00:35:12,476 INFO [HMaster-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:36868, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-08T00:35:12,477 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@510cc70d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-08T00:35:12,478 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-08T00:35:12,479 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3302f0f507bd,39261,1731026110937, seqNum=-1] 2024-11-08T00:35:12,479 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-08T00:35:12,481 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:48354, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-08T00:35:12,484 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=3302f0f507bd,33401,1731026110746 2024-11-08T00:35:12,484 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-08T00:35:12,487 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-08T00:35:12,487 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-08T00:35:12,488 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-08T00:35:12,488 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at org.apache.hadoop.hbase.regionserver.wal.TestLogRolling.testLogRollOnDatanodeDeath(TestLogRolling.java:201) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-08T00:35:12,488 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-08T00:35:12,488 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-08T00:35:12,488 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-08T00:35:12,488 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-08T00:35:12,488 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1369313375, stopped=false 2024-11-08T00:35:12,488 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=3302f0f507bd,33401,1731026110746 2024-11-08T00:35:12,511 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33401-0x10117de44b20000, quorum=127.0.0.1:52740, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-08T00:35:12,511 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39261-0x10117de44b20001, quorum=127.0.0.1:52740, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-08T00:35:12,511 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39261-0x10117de44b20001, quorum=127.0.0.1:52740, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T00:35:12,511 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33401-0x10117de44b20000, quorum=127.0.0.1:52740, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T00:35:12,511 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-08T00:35:12,511 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-08T00:35:12,512 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at org.apache.hadoop.hbase.regionserver.wal.TestLogRolling.testLogRollOnDatanodeDeath(TestLogRolling.java:201) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-08T00:35:12,512 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-08T00:35:12,512 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:39261-0x10117de44b20001, quorum=127.0.0.1:52740, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-08T00:35:12,512 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:33401-0x10117de44b20000, quorum=127.0.0.1:52740, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-08T00:35:12,513 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '3302f0f507bd,39261,1731026110937' ***** 2024-11-08T00:35:12,513 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-08T00:35:12,513 INFO [RS:0;3302f0f507bd:39261 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-08T00:35:12,513 INFO [RS:0;3302f0f507bd:39261 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-08T00:35:12,513 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-08T00:35:12,513 INFO [RS:0;3302f0f507bd:39261 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-08T00:35:12,514 INFO [RS:0;3302f0f507bd:39261 {}] regionserver.HRegionServer(959): stopping server 3302f0f507bd,39261,1731026110937 2024-11-08T00:35:12,514 INFO [RS:0;3302f0f507bd:39261 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-08T00:35:12,514 INFO [RS:0;3302f0f507bd:39261 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;3302f0f507bd:39261. 2024-11-08T00:35:12,514 DEBUG [RS:0;3302f0f507bd:39261 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-08T00:35:12,514 DEBUG [RS:0;3302f0f507bd:39261 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-08T00:35:12,514 INFO [RS:0;3302f0f507bd:39261 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-08T00:35:12,514 INFO [RS:0;3302f0f507bd:39261 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-08T00:35:12,514 INFO [RS:0;3302f0f507bd:39261 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-08T00:35:12,515 INFO [RS:0;3302f0f507bd:39261 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-08T00:35:12,515 INFO [RS:0;3302f0f507bd:39261 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-08T00:35:12,515 DEBUG [RS:0;3302f0f507bd:39261 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-08T00:35:12,515 DEBUG [RS:0;3302f0f507bd:39261 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-08T00:35:12,515 DEBUG [RS_CLOSE_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-08T00:35:12,515 INFO [RS_CLOSE_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-08T00:35:12,516 DEBUG [RS_CLOSE_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-08T00:35:12,516 DEBUG [RS_CLOSE_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-08T00:35:12,516 DEBUG [RS_CLOSE_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-08T00:35:12,516 INFO [RS_CLOSE_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=74 B heapSize=1.22 KB 2024-11-08T00:35:12,538 DEBUG [RS_CLOSE_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33309/user/jenkins/test-data/af5918b1-628a-da39-aa17-a025de318f8e/data/hbase/meta/1588230740/.tmp/ns/755e8747c1414a84b505101d2ccdee1d is 43, key is default/ns:d/1731026112442/Put/seqid=0 2024-11-08T00:35:12,544 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42367 is added to blk_1073741835_1011 (size=5153) 2024-11-08T00:35:12,545 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37901 is added to blk_1073741835_1011 (size=5153) 2024-11-08T00:35:12,545 INFO [RS_CLOSE_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:33309/user/jenkins/test-data/af5918b1-628a-da39-aa17-a025de318f8e/data/hbase/meta/1588230740/.tmp/ns/755e8747c1414a84b505101d2ccdee1d 2024-11-08T00:35:12,554 DEBUG [RS_CLOSE_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33309/user/jenkins/test-data/af5918b1-628a-da39-aa17-a025de318f8e/data/hbase/meta/1588230740/.tmp/ns/755e8747c1414a84b505101d2ccdee1d as hdfs://localhost:33309/user/jenkins/test-data/af5918b1-628a-da39-aa17-a025de318f8e/data/hbase/meta/1588230740/ns/755e8747c1414a84b505101d2ccdee1d 2024-11-08T00:35:12,562 INFO [RS_CLOSE_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33309/user/jenkins/test-data/af5918b1-628a-da39-aa17-a025de318f8e/data/hbase/meta/1588230740/ns/755e8747c1414a84b505101d2ccdee1d, entries=2, sequenceid=6, filesize=5.0 K 2024-11-08T00:35:12,563 INFO [RS_CLOSE_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 47ms, sequenceid=6, compaction requested=false 2024-11-08T00:35:12,563 DEBUG [RS_CLOSE_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-08T00:35:12,569 DEBUG [RS_CLOSE_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33309/user/jenkins/test-data/af5918b1-628a-da39-aa17-a025de318f8e/data/hbase/meta/1588230740/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-08T00:35:12,570 DEBUG [RS_CLOSE_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-08T00:35:12,570 INFO [RS_CLOSE_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-08T00:35:12,570 DEBUG [RS_CLOSE_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731026112515Running coprocessor pre-close hooks at 1731026112515Disabling compacts and flushes for region at 1731026112515Disabling writes for close at 1731026112516 (+1 ms)Obtaining lock to block concurrent updates at 1731026112516Preparing flush snapshotting stores in 1588230740 at 1731026112516Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=74, getHeapSize=1184, getOffHeapSize=0, getCellsCount=2 at 1731026112517 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1731026112518 (+1 ms)Flushing 1588230740/ns: creating writer at 1731026112518Flushing 1588230740/ns: appending metadata at 1731026112538 (+20 ms)Flushing 1588230740/ns: closing flushed file at 1731026112538Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@799c5b4: reopening flushed file at 1731026112553 (+15 ms)Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 47ms, sequenceid=6, compaction requested=false at 1731026112563 (+10 ms)Writing region close event to WAL at 1731026112565 (+2 ms)Running coprocessor post-close hooks at 1731026112570 (+5 ms)Closed at 1731026112570 2024-11-08T00:35:12,570 DEBUG [RS_CLOSE_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-08T00:35:12,657 INFO [regionserver/3302f0f507bd:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-08T00:35:12,657 INFO [regionserver/3302f0f507bd:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-08T00:35:12,715 INFO [RS:0;3302f0f507bd:39261 {}] regionserver.HRegionServer(976): stopping server 3302f0f507bd,39261,1731026110937; all regions closed. 2024-11-08T00:35:12,716 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:35:12,716 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:35:12,716 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:35:12,717 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:35:12,717 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:35:12,719 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42367 is added to blk_1073741834_1010 (size=1152) 2024-11-08T00:35:12,720 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37901 is added to blk_1073741834_1010 (size=1152) 2024-11-08T00:35:12,722 DEBUG [RS:0;3302f0f507bd:39261 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/af5918b1-628a-da39-aa17-a025de318f8e/oldWALs 2024-11-08T00:35:12,723 INFO [RS:0;3302f0f507bd:39261 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 3302f0f507bd%2C39261%2C1731026110937.meta:.meta(num 1731026112258) 2024-11-08T00:35:12,723 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:35:12,723 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:35:12,723 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:35:12,723 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:35:12,723 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:35:12,726 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37901 is added to blk_1073741833_1009 (size=93) 2024-11-08T00:35:12,726 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42367 is added to blk_1073741833_1009 (size=93) 2024-11-08T00:35:12,729 DEBUG [RS:0;3302f0f507bd:39261 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/af5918b1-628a-da39-aa17-a025de318f8e/oldWALs 2024-11-08T00:35:12,729 INFO [RS:0;3302f0f507bd:39261 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 3302f0f507bd%2C39261%2C1731026110937:(num 1731026111808) 2024-11-08T00:35:12,729 DEBUG [RS:0;3302f0f507bd:39261 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-08T00:35:12,729 INFO [RS:0;3302f0f507bd:39261 {}] regionserver.LeaseManager(133): Closed leases 2024-11-08T00:35:12,729 INFO [RS:0;3302f0f507bd:39261 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-08T00:35:12,729 INFO [RS:0;3302f0f507bd:39261 {}] hbase.ChoreService(370): Chore service for: regionserver/3302f0f507bd:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-08T00:35:12,730 INFO [RS:0;3302f0f507bd:39261 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-08T00:35:12,730 INFO [regionserver/3302f0f507bd:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-08T00:35:12,730 INFO [RS:0;3302f0f507bd:39261 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:39261 2024-11-08T00:35:12,742 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33401-0x10117de44b20000, quorum=127.0.0.1:52740, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-08T00:35:12,742 INFO [RS:0;3302f0f507bd:39261 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-08T00:35:12,742 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39261-0x10117de44b20001, quorum=127.0.0.1:52740, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/3302f0f507bd,39261,1731026110937 2024-11-08T00:35:12,753 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [3302f0f507bd,39261,1731026110937] 2024-11-08T00:35:12,754 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:35:12,763 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/3302f0f507bd,39261,1731026110937 already deleted, retry=false 2024-11-08T00:35:12,764 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 3302f0f507bd,39261,1731026110937 expired; onlineServers=0 2024-11-08T00:35:12,764 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '3302f0f507bd,33401,1731026110746' ***** 2024-11-08T00:35:12,764 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-08T00:35:12,764 INFO [M:0;3302f0f507bd:33401 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-08T00:35:12,764 INFO [M:0;3302f0f507bd:33401 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-08T00:35:12,764 DEBUG [M:0;3302f0f507bd:33401 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-08T00:35:12,764 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-08T00:35:12,766 DEBUG [M:0;3302f0f507bd:33401 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-08T00:35:12,766 DEBUG [master/3302f0f507bd:0:becomeActiveMaster-HFileCleaner.large.0-1731026111537 {}] cleaner.HFileCleaner(306): Exit Thread[master/3302f0f507bd:0:becomeActiveMaster-HFileCleaner.large.0-1731026111537,5,FailOnTimeoutGroup] 2024-11-08T00:35:12,766 DEBUG [master/3302f0f507bd:0:becomeActiveMaster-HFileCleaner.small.0-1731026111537 {}] cleaner.HFileCleaner(306): Exit Thread[master/3302f0f507bd:0:becomeActiveMaster-HFileCleaner.small.0-1731026111537,5,FailOnTimeoutGroup] 2024-11-08T00:35:12,766 INFO [M:0;3302f0f507bd:33401 {}] hbase.ChoreService(370): Chore service for: master/3302f0f507bd:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-08T00:35:12,766 INFO [M:0;3302f0f507bd:33401 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-08T00:35:12,766 DEBUG [M:0;3302f0f507bd:33401 {}] master.HMaster(1795): Stopping service threads 2024-11-08T00:35:12,766 INFO [M:0;3302f0f507bd:33401 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-08T00:35:12,767 INFO [M:0;3302f0f507bd:33401 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-08T00:35:12,767 INFO [M:0;3302f0f507bd:33401 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-08T00:35:12,767 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-08T00:35:12,769 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:35:12,774 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33401-0x10117de44b20000, quorum=127.0.0.1:52740, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-08T00:35:12,774 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33401-0x10117de44b20000, quorum=127.0.0.1:52740, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T00:35:12,784 DEBUG [M:0;3302f0f507bd:33401 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/master already deleted, retry=false 2024-11-08T00:35:12,784 DEBUG [M:0;3302f0f507bd:33401 {}] master.ActiveMasterManager(353): master:33401-0x10117de44b20000, quorum=127.0.0.1:52740, baseZNode=/hbase Failed delete of our master address node; KeeperErrorCode = NoNode for /hbase/master 2024-11-08T00:35:12,785 INFO [M:0;3302f0f507bd:33401 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:33309/user/jenkins/test-data/af5918b1-628a-da39-aa17-a025de318f8e/.lastflushedseqids 2024-11-08T00:35:12,791 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42367 is added to blk_1073741836_1012 (size=108) 2024-11-08T00:35:12,792 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37901 is added to blk_1073741836_1012 (size=108) 2024-11-08T00:35:12,792 INFO [M:0;3302f0f507bd:33401 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-08T00:35:12,792 INFO [M:0;3302f0f507bd:33401 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-08T00:35:12,792 DEBUG [M:0;3302f0f507bd:33401 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-08T00:35:12,792 INFO [M:0;3302f0f507bd:33401 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-08T00:35:12,792 DEBUG [M:0;3302f0f507bd:33401 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-08T00:35:12,792 DEBUG [M:0;3302f0f507bd:33401 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-08T00:35:12,792 DEBUG [M:0;3302f0f507bd:33401 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-08T00:35:12,793 INFO [M:0;3302f0f507bd:33401 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=7.67 KB heapSize=11.34 KB 2024-11-08T00:35:12,815 DEBUG [M:0;3302f0f507bd:33401 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33309/user/jenkins/test-data/af5918b1-628a-da39-aa17-a025de318f8e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/19a7140fb1044dd68a98616580204732 is 82, key is hbase:meta,,1/info:regioninfo/1731026112295/Put/seqid=0 2024-11-08T00:35:12,821 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37901 is added to blk_1073741837_1013 (size=5672) 2024-11-08T00:35:12,821 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42367 is added to blk_1073741837_1013 (size=5672) 2024-11-08T00:35:12,822 INFO [M:0;3302f0f507bd:33401 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:33309/user/jenkins/test-data/af5918b1-628a-da39-aa17-a025de318f8e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/19a7140fb1044dd68a98616580204732 2024-11-08T00:35:12,846 DEBUG [M:0;3302f0f507bd:33401 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33309/user/jenkins/test-data/af5918b1-628a-da39-aa17-a025de318f8e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/afcec5963bed4660b5008439314f851e is 240, key is \x00\x00\x00\x00\x00\x00\x00\x01/proc:d/1731026112448/Put/seqid=0 2024-11-08T00:35:12,852 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37901 is added to blk_1073741838_1014 (size=5275) 2024-11-08T00:35:12,852 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42367 is added to blk_1073741838_1014 (size=5275) 2024-11-08T00:35:12,852 INFO [M:0;3302f0f507bd:33401 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.06 KB at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:33309/user/jenkins/test-data/af5918b1-628a-da39-aa17-a025de318f8e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/afcec5963bed4660b5008439314f851e 2024-11-08T00:35:12,853 INFO [RS:0;3302f0f507bd:39261 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-08T00:35:12,853 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39261-0x10117de44b20001, quorum=127.0.0.1:52740, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-08T00:35:12,853 INFO [RS:0;3302f0f507bd:39261 {}] regionserver.HRegionServer(1031): Exiting; stopping=3302f0f507bd,39261,1731026110937; zookeeper connection closed. 2024-11-08T00:35:12,853 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39261-0x10117de44b20001, quorum=127.0.0.1:52740, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-08T00:35:12,853 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@1453a7c4 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@1453a7c4 2024-11-08T00:35:12,854 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-08T00:35:12,877 DEBUG [M:0;3302f0f507bd:33401 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33309/user/jenkins/test-data/af5918b1-628a-da39-aa17-a025de318f8e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/9b950e9144544191aeb31201819a4e83 is 69, key is 3302f0f507bd,39261,1731026110937/rs:state/1731026111622/Put/seqid=0 2024-11-08T00:35:12,884 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42367 is added to blk_1073741839_1015 (size=5156) 2024-11-08T00:35:12,884 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37901 is added to blk_1073741839_1015 (size=5156) 2024-11-08T00:35:12,884 INFO [M:0;3302f0f507bd:33401 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:33309/user/jenkins/test-data/af5918b1-628a-da39-aa17-a025de318f8e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/9b950e9144544191aeb31201819a4e83 2024-11-08T00:35:12,913 DEBUG [M:0;3302f0f507bd:33401 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33309/user/jenkins/test-data/af5918b1-628a-da39-aa17-a025de318f8e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/0ae5b2dd39a142da88935a4f92c6b69d is 52, key is load_balancer_on/state:d/1731026112486/Put/seqid=0 2024-11-08T00:35:12,918 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42367 is added to blk_1073741840_1016 (size=5056) 2024-11-08T00:35:12,918 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37901 is added to blk_1073741840_1016 (size=5056) 2024-11-08T00:35:12,919 INFO [M:0;3302f0f507bd:33401 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:33309/user/jenkins/test-data/af5918b1-628a-da39-aa17-a025de318f8e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/0ae5b2dd39a142da88935a4f92c6b69d 2024-11-08T00:35:12,926 DEBUG [M:0;3302f0f507bd:33401 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33309/user/jenkins/test-data/af5918b1-628a-da39-aa17-a025de318f8e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/19a7140fb1044dd68a98616580204732 as hdfs://localhost:33309/user/jenkins/test-data/af5918b1-628a-da39-aa17-a025de318f8e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/19a7140fb1044dd68a98616580204732 2024-11-08T00:35:12,933 INFO [M:0;3302f0f507bd:33401 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33309/user/jenkins/test-data/af5918b1-628a-da39-aa17-a025de318f8e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/19a7140fb1044dd68a98616580204732, entries=8, sequenceid=29, filesize=5.5 K 2024-11-08T00:35:12,934 DEBUG [M:0;3302f0f507bd:33401 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33309/user/jenkins/test-data/af5918b1-628a-da39-aa17-a025de318f8e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/afcec5963bed4660b5008439314f851e as hdfs://localhost:33309/user/jenkins/test-data/af5918b1-628a-da39-aa17-a025de318f8e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/afcec5963bed4660b5008439314f851e 2024-11-08T00:35:12,941 INFO [M:0;3302f0f507bd:33401 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33309/user/jenkins/test-data/af5918b1-628a-da39-aa17-a025de318f8e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/afcec5963bed4660b5008439314f851e, entries=3, sequenceid=29, filesize=5.2 K 2024-11-08T00:35:12,942 DEBUG [M:0;3302f0f507bd:33401 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33309/user/jenkins/test-data/af5918b1-628a-da39-aa17-a025de318f8e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/9b950e9144544191aeb31201819a4e83 as hdfs://localhost:33309/user/jenkins/test-data/af5918b1-628a-da39-aa17-a025de318f8e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/9b950e9144544191aeb31201819a4e83 2024-11-08T00:35:12,950 INFO [M:0;3302f0f507bd:33401 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33309/user/jenkins/test-data/af5918b1-628a-da39-aa17-a025de318f8e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/9b950e9144544191aeb31201819a4e83, entries=1, sequenceid=29, filesize=5.0 K 2024-11-08T00:35:12,951 DEBUG [M:0;3302f0f507bd:33401 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33309/user/jenkins/test-data/af5918b1-628a-da39-aa17-a025de318f8e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/0ae5b2dd39a142da88935a4f92c6b69d as hdfs://localhost:33309/user/jenkins/test-data/af5918b1-628a-da39-aa17-a025de318f8e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/0ae5b2dd39a142da88935a4f92c6b69d 2024-11-08T00:35:12,959 INFO [M:0;3302f0f507bd:33401 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33309/user/jenkins/test-data/af5918b1-628a-da39-aa17-a025de318f8e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/0ae5b2dd39a142da88935a4f92c6b69d, entries=1, sequenceid=29, filesize=4.9 K 2024-11-08T00:35:12,961 INFO [M:0;3302f0f507bd:33401 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 169ms, sequenceid=29, compaction requested=false 2024-11-08T00:35:12,964 INFO [M:0;3302f0f507bd:33401 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-08T00:35:12,964 DEBUG [M:0;3302f0f507bd:33401 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731026112792Disabling compacts and flushes for region at 1731026112792Disabling writes for close at 1731026112792Obtaining lock to block concurrent updates at 1731026112793 (+1 ms)Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731026112793Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=7850, getHeapSize=11544, getOffHeapSize=0, getCellsCount=36 at 1731026112793Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731026112794 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731026112794Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731026112815 (+21 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731026112815Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731026112829 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731026112845 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731026112845Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731026112859 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731026112876 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731026112876Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731026112891 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731026112913 (+22 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731026112913Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1bf90ed2: reopening flushed file at 1731026112925 (+12 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2633f4c8: reopening flushed file at 1731026112933 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7228c782: reopening flushed file at 1731026112941 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@16780b40: reopening flushed file at 1731026112950 (+9 ms)Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 169ms, sequenceid=29, compaction requested=false at 1731026112961 (+11 ms)Writing region close event to WAL at 1731026112964 (+3 ms)Closed at 1731026112964 2024-11-08T00:35:12,965 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:35:12,966 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:35:12,966 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:35:12,966 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:35:12,966 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:35:12,968 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37901 is added to blk_1073741830_1006 (size=10311) 2024-11-08T00:35:12,969 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42367 is added to blk_1073741830_1006 (size=10311) 2024-11-08T00:35:12,969 INFO [M:0;3302f0f507bd:33401 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-08T00:35:12,969 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-08T00:35:12,969 INFO [M:0;3302f0f507bd:33401 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:33401 2024-11-08T00:35:12,970 INFO [M:0;3302f0f507bd:33401 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-08T00:35:13,082 INFO [M:0;3302f0f507bd:33401 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-08T00:35:13,082 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33401-0x10117de44b20000, quorum=127.0.0.1:52740, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-08T00:35:13,082 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33401-0x10117de44b20000, quorum=127.0.0.1:52740, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-08T00:35:13,084 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@75434f63{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-08T00:35:13,085 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1a2066f8{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-08T00:35:13,085 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-08T00:35:13,085 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@aab268d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-08T00:35:13,085 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1ce533a5{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3b3f81e4-f799-70da-ef23-dbcf0c2eba97/hadoop.log.dir/,STOPPED} 2024-11-08T00:35:13,086 WARN [BP-105332543-172.17.0.3-1731026108375 heartbeating to localhost/127.0.0.1:33309 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-08T00:35:13,086 WARN [BP-105332543-172.17.0.3-1731026108375 heartbeating to localhost/127.0.0.1:33309 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-105332543-172.17.0.3-1731026108375 (Datanode Uuid 3c038d89-55c9-451f-8333-2b1ecba7be71) service to localhost/127.0.0.1:33309 2024-11-08T00:35:13,086 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-08T00:35:13,087 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-08T00:35:13,087 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3b3f81e4-f799-70da-ef23-dbcf0c2eba97/cluster_27fd3343-831d-be17-3af5-e5b42c9286b7/data/data3/current/BP-105332543-172.17.0.3-1731026108375 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-08T00:35:13,087 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3b3f81e4-f799-70da-ef23-dbcf0c2eba97/cluster_27fd3343-831d-be17-3af5-e5b42c9286b7/data/data4/current/BP-105332543-172.17.0.3-1731026108375 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-08T00:35:13,088 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-08T00:35:13,090 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7d69c419{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-08T00:35:13,090 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2f2378c9{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-08T00:35:13,090 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-08T00:35:13,090 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7517d9e5{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-08T00:35:13,090 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3e23c0c8{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3b3f81e4-f799-70da-ef23-dbcf0c2eba97/hadoop.log.dir/,STOPPED} 2024-11-08T00:35:13,092 WARN [BP-105332543-172.17.0.3-1731026108375 heartbeating to localhost/127.0.0.1:33309 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-08T00:35:13,092 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-08T00:35:13,092 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-08T00:35:13,092 WARN [BP-105332543-172.17.0.3-1731026108375 heartbeating to localhost/127.0.0.1:33309 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-105332543-172.17.0.3-1731026108375 (Datanode Uuid 9d25cddd-241b-4851-9300-3264378aa578) service to localhost/127.0.0.1:33309 2024-11-08T00:35:13,092 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3b3f81e4-f799-70da-ef23-dbcf0c2eba97/cluster_27fd3343-831d-be17-3af5-e5b42c9286b7/data/data1/current/BP-105332543-172.17.0.3-1731026108375 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-08T00:35:13,093 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3b3f81e4-f799-70da-ef23-dbcf0c2eba97/cluster_27fd3343-831d-be17-3af5-e5b42c9286b7/data/data2/current/BP-105332543-172.17.0.3-1731026108375 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-08T00:35:13,093 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-08T00:35:13,098 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7d95bc23{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-08T00:35:13,099 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@78fa6004{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-08T00:35:13,099 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-08T00:35:13,099 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@30a1c2a3{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-08T00:35:13,100 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2f841e9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3b3f81e4-f799-70da-ef23-dbcf0c2eba97/hadoop.log.dir/,STOPPED} 2024-11-08T00:35:13,107 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-08T00:35:13,133 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-08T00:35:13,133 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-08T00:35:13,133 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3b3f81e4-f799-70da-ef23-dbcf0c2eba97/hadoop.log.dir so I do NOT create it in target/test-data/33fe3ad3-2016-67dd-bf13-d3c46fb5d9ce 2024-11-08T00:35:13,133 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3b3f81e4-f799-70da-ef23-dbcf0c2eba97/hadoop.tmp.dir so I do NOT create it in target/test-data/33fe3ad3-2016-67dd-bf13-d3c46fb5d9ce 2024-11-08T00:35:13,133 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/33fe3ad3-2016-67dd-bf13-d3c46fb5d9ce/cluster_94d4fa05-7ba4-b350-d218-42d4a2c67eba, deleteOnExit=true 2024-11-08T00:35:13,133 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-08T00:35:13,134 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/33fe3ad3-2016-67dd-bf13-d3c46fb5d9ce/test.cache.data in system properties and HBase conf 2024-11-08T00:35:13,134 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/33fe3ad3-2016-67dd-bf13-d3c46fb5d9ce/hadoop.tmp.dir in system properties and HBase conf 2024-11-08T00:35:13,134 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/33fe3ad3-2016-67dd-bf13-d3c46fb5d9ce/hadoop.log.dir in system properties and HBase conf 2024-11-08T00:35:13,134 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/33fe3ad3-2016-67dd-bf13-d3c46fb5d9ce/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-08T00:35:13,134 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/33fe3ad3-2016-67dd-bf13-d3c46fb5d9ce/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-08T00:35:13,134 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-08T00:35:13,134 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-08T00:35:13,135 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/33fe3ad3-2016-67dd-bf13-d3c46fb5d9ce/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-08T00:35:13,135 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/33fe3ad3-2016-67dd-bf13-d3c46fb5d9ce/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-08T00:35:13,135 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/33fe3ad3-2016-67dd-bf13-d3c46fb5d9ce/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-08T00:35:13,135 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/33fe3ad3-2016-67dd-bf13-d3c46fb5d9ce/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-08T00:35:13,135 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/33fe3ad3-2016-67dd-bf13-d3c46fb5d9ce/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-08T00:35:13,135 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/33fe3ad3-2016-67dd-bf13-d3c46fb5d9ce/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-08T00:35:13,135 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/33fe3ad3-2016-67dd-bf13-d3c46fb5d9ce/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-08T00:35:13,136 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/33fe3ad3-2016-67dd-bf13-d3c46fb5d9ce/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-08T00:35:13,136 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/33fe3ad3-2016-67dd-bf13-d3c46fb5d9ce/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-08T00:35:13,136 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/33fe3ad3-2016-67dd-bf13-d3c46fb5d9ce/nfs.dump.dir in system properties and HBase conf 2024-11-08T00:35:13,136 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/33fe3ad3-2016-67dd-bf13-d3c46fb5d9ce/java.io.tmpdir in system properties and HBase conf 2024-11-08T00:35:13,136 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/33fe3ad3-2016-67dd-bf13-d3c46fb5d9ce/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-08T00:35:13,136 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/33fe3ad3-2016-67dd-bf13-d3c46fb5d9ce/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-08T00:35:13,136 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/33fe3ad3-2016-67dd-bf13-d3c46fb5d9ce/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-08T00:35:13,156 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-08T00:35:13,284 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-08T00:35:13,287 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:35:13,308 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:35:13,311 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:35:13,312 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:35:13,492 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-08T00:35:13,499 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-08T00:35:13,504 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-08T00:35:13,504 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-08T00:35:13,504 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-08T00:35:13,505 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-08T00:35:13,506 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3150e6db{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/33fe3ad3-2016-67dd-bf13-d3c46fb5d9ce/hadoop.log.dir/,AVAILABLE} 2024-11-08T00:35:13,506 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1d790455{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-08T00:35:13,619 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7982676d{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/33fe3ad3-2016-67dd-bf13-d3c46fb5d9ce/java.io.tmpdir/jetty-localhost-43561-hadoop-hdfs-3_4_1-tests_jar-_-any-17227842009868437099/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-08T00:35:13,619 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2efbdc75{HTTP/1.1, (http/1.1)}{localhost:43561} 2024-11-08T00:35:13,620 INFO [Time-limited test {}] server.Server(415): Started @107717ms 2024-11-08T00:35:13,633 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-08T00:35:13,656 INFO [regionserver/3302f0f507bd:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-08T00:35:13,908 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-08T00:35:13,912 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-08T00:35:13,917 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-08T00:35:13,917 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-08T00:35:13,917 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-08T00:35:13,917 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1bf32f74{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/33fe3ad3-2016-67dd-bf13-d3c46fb5d9ce/hadoop.log.dir/,AVAILABLE} 2024-11-08T00:35:13,918 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1bb5d847{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-08T00:35:14,028 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5538b075{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/33fe3ad3-2016-67dd-bf13-d3c46fb5d9ce/java.io.tmpdir/jetty-localhost-37531-hadoop-hdfs-3_4_1-tests_jar-_-any-12940326850226527430/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-08T00:35:14,028 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3f87a993{HTTP/1.1, (http/1.1)}{localhost:37531} 2024-11-08T00:35:14,029 INFO [Time-limited test {}] server.Server(415): Started @108126ms 2024-11-08T00:35:14,030 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-08T00:35:14,065 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-08T00:35:14,069 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-08T00:35:14,070 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-08T00:35:14,070 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-08T00:35:14,070 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-08T00:35:14,070 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@32403ac6{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/33fe3ad3-2016-67dd-bf13-d3c46fb5d9ce/hadoop.log.dir/,AVAILABLE} 2024-11-08T00:35:14,071 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4bb19ef9{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-08T00:35:14,177 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@272348fe{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/33fe3ad3-2016-67dd-bf13-d3c46fb5d9ce/java.io.tmpdir/jetty-localhost-33549-hadoop-hdfs-3_4_1-tests_jar-_-any-3353487194623149435/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-08T00:35:14,177 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@10b53169{HTTP/1.1, (http/1.1)}{localhost:33549} 2024-11-08T00:35:14,177 INFO [Time-limited test {}] server.Server(415): Started @108275ms 2024-11-08T00:35:14,179 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-08T00:35:15,201 WARN [Thread-674 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/33fe3ad3-2016-67dd-bf13-d3c46fb5d9ce/cluster_94d4fa05-7ba4-b350-d218-42d4a2c67eba/data/data2/current/BP-648086526-172.17.0.3-1731026113172/current, will proceed with Du for space computation calculation, 2024-11-08T00:35:15,201 WARN [Thread-673 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/33fe3ad3-2016-67dd-bf13-d3c46fb5d9ce/cluster_94d4fa05-7ba4-b350-d218-42d4a2c67eba/data/data1/current/BP-648086526-172.17.0.3-1731026113172/current, will proceed with Du for space computation calculation, 2024-11-08T00:35:15,222 WARN [Thread-637 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-08T00:35:15,225 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x7ff44519fc33bb42 with lease ID 0xafb28d9be54f5e4e: Processing first storage report for DS-e64dccec-6029-4220-b1b3-3165180151b4 from datanode DatanodeRegistration(127.0.0.1:33655, datanodeUuid=9b02e5a7-7116-4049-af6b-7af741ab247e, infoPort=33405, infoSecurePort=0, ipcPort=35163, storageInfo=lv=-57;cid=testClusterID;nsid=771899493;c=1731026113172) 2024-11-08T00:35:15,225 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x7ff44519fc33bb42 with lease ID 0xafb28d9be54f5e4e: from storage DS-e64dccec-6029-4220-b1b3-3165180151b4 node DatanodeRegistration(127.0.0.1:33655, datanodeUuid=9b02e5a7-7116-4049-af6b-7af741ab247e, infoPort=33405, infoSecurePort=0, ipcPort=35163, storageInfo=lv=-57;cid=testClusterID;nsid=771899493;c=1731026113172), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-08T00:35:15,225 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x7ff44519fc33bb42 with lease ID 0xafb28d9be54f5e4e: Processing first storage report for DS-5135d9ad-4cea-4b51-adc6-935d3ab21a02 from datanode DatanodeRegistration(127.0.0.1:33655, datanodeUuid=9b02e5a7-7116-4049-af6b-7af741ab247e, infoPort=33405, infoSecurePort=0, ipcPort=35163, storageInfo=lv=-57;cid=testClusterID;nsid=771899493;c=1731026113172) 2024-11-08T00:35:15,225 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x7ff44519fc33bb42 with lease ID 0xafb28d9be54f5e4e: from storage DS-5135d9ad-4cea-4b51-adc6-935d3ab21a02 node DatanodeRegistration(127.0.0.1:33655, datanodeUuid=9b02e5a7-7116-4049-af6b-7af741ab247e, infoPort=33405, infoSecurePort=0, ipcPort=35163, storageInfo=lv=-57;cid=testClusterID;nsid=771899493;c=1731026113172), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-08T00:35:15,340 WARN [Thread-684 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/33fe3ad3-2016-67dd-bf13-d3c46fb5d9ce/cluster_94d4fa05-7ba4-b350-d218-42d4a2c67eba/data/data3/current/BP-648086526-172.17.0.3-1731026113172/current, will proceed with Du for space computation calculation, 2024-11-08T00:35:15,340 WARN [Thread-685 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/33fe3ad3-2016-67dd-bf13-d3c46fb5d9ce/cluster_94d4fa05-7ba4-b350-d218-42d4a2c67eba/data/data4/current/BP-648086526-172.17.0.3-1731026113172/current, will proceed with Du for space computation calculation, 2024-11-08T00:35:15,364 WARN [Thread-660 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-08T00:35:15,368 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xf904dbd44fc7808b with lease ID 0xafb28d9be54f5e4f: Processing first storage report for DS-5e11058e-8356-4495-ab75-8441e34f8bac from datanode DatanodeRegistration(127.0.0.1:38985, datanodeUuid=1be0d67d-0789-4e98-b5fc-be9cc512b3fb, infoPort=39491, infoSecurePort=0, ipcPort=44935, storageInfo=lv=-57;cid=testClusterID;nsid=771899493;c=1731026113172) 2024-11-08T00:35:15,368 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf904dbd44fc7808b with lease ID 0xafb28d9be54f5e4f: from storage DS-5e11058e-8356-4495-ab75-8441e34f8bac node DatanodeRegistration(127.0.0.1:38985, datanodeUuid=1be0d67d-0789-4e98-b5fc-be9cc512b3fb, infoPort=39491, infoSecurePort=0, ipcPort=44935, storageInfo=lv=-57;cid=testClusterID;nsid=771899493;c=1731026113172), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-08T00:35:15,368 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xf904dbd44fc7808b with lease ID 0xafb28d9be54f5e4f: Processing first storage report for DS-0713e8c4-258c-4423-bff3-95c2f3fab339 from datanode DatanodeRegistration(127.0.0.1:38985, datanodeUuid=1be0d67d-0789-4e98-b5fc-be9cc512b3fb, infoPort=39491, infoSecurePort=0, ipcPort=44935, storageInfo=lv=-57;cid=testClusterID;nsid=771899493;c=1731026113172) 2024-11-08T00:35:15,368 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf904dbd44fc7808b with lease ID 0xafb28d9be54f5e4f: from storage DS-0713e8c4-258c-4423-bff3-95c2f3fab339 node DatanodeRegistration(127.0.0.1:38985, datanodeUuid=1be0d67d-0789-4e98-b5fc-be9cc512b3fb, infoPort=39491, infoSecurePort=0, ipcPort=44935, storageInfo=lv=-57;cid=testClusterID;nsid=771899493;c=1731026113172), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-08T00:35:15,430 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/33fe3ad3-2016-67dd-bf13-d3c46fb5d9ce 2024-11-08T00:35:15,433 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/33fe3ad3-2016-67dd-bf13-d3c46fb5d9ce/cluster_94d4fa05-7ba4-b350-d218-42d4a2c67eba/zookeeper_0, clientPort=55789, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/33fe3ad3-2016-67dd-bf13-d3c46fb5d9ce/cluster_94d4fa05-7ba4-b350-d218-42d4a2c67eba/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/33fe3ad3-2016-67dd-bf13-d3c46fb5d9ce/cluster_94d4fa05-7ba4-b350-d218-42d4a2c67eba/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-08T00:35:15,434 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=55789 2024-11-08T00:35:15,434 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-08T00:35:15,436 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-08T00:35:15,445 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38985 is added to blk_1073741825_1001 (size=7) 2024-11-08T00:35:15,446 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33655 is added to blk_1073741825_1001 (size=7) 2024-11-08T00:35:15,447 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8 with version=8 2024-11-08T00:35:15,447 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:45125/user/jenkins/test-data/a52118f7-c961-aacf-3827-5de0e737d868/hbase-staging 2024-11-08T00:35:15,450 INFO [Time-limited test {}] client.ConnectionUtils(128): master/3302f0f507bd:0 server-side Connection retries=45 2024-11-08T00:35:15,450 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-08T00:35:15,450 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-08T00:35:15,450 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-08T00:35:15,450 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-08T00:35:15,450 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-08T00:35:15,450 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-08T00:35:15,450 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-08T00:35:15,452 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:33483 2024-11-08T00:35:15,454 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:33483 connecting to ZooKeeper ensemble=127.0.0.1:55789 2024-11-08T00:35:15,508 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:334830x0, quorum=127.0.0.1:55789, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-08T00:35:15,510 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:33483-0x10117de57140000 connected 2024-11-08T00:35:15,606 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-08T00:35:15,608 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-08T00:35:15,610 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:33483-0x10117de57140000, quorum=127.0.0.1:55789, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-08T00:35:15,611 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8, hbase.cluster.distributed=false 2024-11-08T00:35:15,613 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:33483-0x10117de57140000, quorum=127.0.0.1:55789, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-08T00:35:15,613 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=33483 2024-11-08T00:35:15,613 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=33483 2024-11-08T00:35:15,614 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=33483 2024-11-08T00:35:15,614 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=33483 2024-11-08T00:35:15,614 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=33483 2024-11-08T00:35:15,633 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/3302f0f507bd:0 server-side Connection retries=45 2024-11-08T00:35:15,633 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-08T00:35:15,633 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-08T00:35:15,633 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-08T00:35:15,633 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-08T00:35:15,633 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-08T00:35:15,633 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-08T00:35:15,633 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-08T00:35:15,634 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:42513 2024-11-08T00:35:15,636 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:42513 connecting to ZooKeeper ensemble=127.0.0.1:55789 2024-11-08T00:35:15,637 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-08T00:35:15,640 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-08T00:35:15,655 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:425130x0, quorum=127.0.0.1:55789, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-08T00:35:15,656 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42513-0x10117de57140001, quorum=127.0.0.1:55789, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-08T00:35:15,656 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:42513-0x10117de57140001 connected 2024-11-08T00:35:15,656 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-08T00:35:15,657 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-08T00:35:15,657 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42513-0x10117de57140001, quorum=127.0.0.1:55789, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-08T00:35:15,658 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42513-0x10117de57140001, quorum=127.0.0.1:55789, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-08T00:35:15,659 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=42513 2024-11-08T00:35:15,659 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=42513 2024-11-08T00:35:15,668 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=42513 2024-11-08T00:35:15,672 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=42513 2024-11-08T00:35:15,673 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=42513 2024-11-08T00:35:15,685 DEBUG [M:0;3302f0f507bd:33483 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;3302f0f507bd:33483 2024-11-08T00:35:15,686 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/3302f0f507bd,33483,1731026115449 2024-11-08T00:35:15,697 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42513-0x10117de57140001, quorum=127.0.0.1:55789, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-08T00:35:15,697 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33483-0x10117de57140000, quorum=127.0.0.1:55789, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-08T00:35:15,698 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:33483-0x10117de57140000, quorum=127.0.0.1:55789, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/3302f0f507bd,33483,1731026115449 2024-11-08T00:35:15,711 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33483-0x10117de57140000, quorum=127.0.0.1:55789, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T00:35:15,711 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42513-0x10117de57140001, quorum=127.0.0.1:55789, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-08T00:35:15,711 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42513-0x10117de57140001, quorum=127.0.0.1:55789, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T00:35:15,712 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:33483-0x10117de57140000, quorum=127.0.0.1:55789, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-08T00:35:15,713 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/3302f0f507bd,33483,1731026115449 from backup master directory 2024-11-08T00:35:15,721 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42513-0x10117de57140001, quorum=127.0.0.1:55789, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-08T00:35:15,721 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33483-0x10117de57140000, quorum=127.0.0.1:55789, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/3302f0f507bd,33483,1731026115449 2024-11-08T00:35:15,721 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33483-0x10117de57140000, quorum=127.0.0.1:55789, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-08T00:35:15,721 WARN [master/3302f0f507bd:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-08T00:35:15,721 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=3302f0f507bd,33483,1731026115449 2024-11-08T00:35:15,726 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/hbase.id] with ID: 9ff58e9f-1114-45e2-8ccb-609e57d4527d 2024-11-08T00:35:15,726 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/.tmp/hbase.id 2024-11-08T00:35:15,732 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38985 is added to blk_1073741826_1002 (size=42) 2024-11-08T00:35:15,733 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33655 is added to blk_1073741826_1002 (size=42) 2024-11-08T00:35:15,733 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/.tmp/hbase.id]:[hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/hbase.id] 2024-11-08T00:35:15,749 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-08T00:35:15,749 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-08T00:35:15,751 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-11-08T00:35:15,763 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33483-0x10117de57140000, quorum=127.0.0.1:55789, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T00:35:15,763 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42513-0x10117de57140001, quorum=127.0.0.1:55789, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T00:35:15,773 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38985 is added to blk_1073741827_1003 (size=196) 2024-11-08T00:35:15,773 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33655 is added to blk_1073741827_1003 (size=196) 2024-11-08T00:35:15,774 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-08T00:35:15,775 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-08T00:35:15,775 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-08T00:35:15,783 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33655 is added to blk_1073741828_1004 (size=1189) 2024-11-08T00:35:15,784 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38985 is added to blk_1073741828_1004 (size=1189) 2024-11-08T00:35:15,785 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/MasterData/data/master/store 2024-11-08T00:35:15,792 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38985 is added to blk_1073741829_1005 (size=34) 2024-11-08T00:35:15,792 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33655 is added to blk_1073741829_1005 (size=34) 2024-11-08T00:35:15,793 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-08T00:35:15,793 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-08T00:35:15,793 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-08T00:35:15,793 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-08T00:35:15,793 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-08T00:35:15,793 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-08T00:35:15,794 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-08T00:35:15,794 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731026115793Disabling compacts and flushes for region at 1731026115793Disabling writes for close at 1731026115793Writing region close event to WAL at 1731026115793Closed at 1731026115793 2024-11-08T00:35:15,795 WARN [master/3302f0f507bd:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/MasterData/data/master/store/.initializing 2024-11-08T00:35:15,795 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/MasterData/WALs/3302f0f507bd,33483,1731026115449 2024-11-08T00:35:15,798 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=3302f0f507bd%2C33483%2C1731026115449, suffix=, logDir=hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/MasterData/WALs/3302f0f507bd,33483,1731026115449, archiveDir=hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/MasterData/oldWALs, maxLogs=10 2024-11-08T00:35:15,799 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3302f0f507bd%2C33483%2C1731026115449.1731026115799 2024-11-08T00:35:15,804 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/MasterData/WALs/3302f0f507bd,33483,1731026115449/3302f0f507bd%2C33483%2C1731026115449.1731026115799 2024-11-08T00:35:15,805 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33405:33405),(127.0.0.1/127.0.0.1:39491:39491)] 2024-11-08T00:35:15,807 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-08T00:35:15,808 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-08T00:35:15,808 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-08T00:35:15,808 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-08T00:35:15,809 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-08T00:35:15,811 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-08T00:35:15,811 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T00:35:15,812 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-08T00:35:15,812 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-08T00:35:15,814 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-08T00:35:15,814 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T00:35:15,815 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-08T00:35:15,815 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-08T00:35:15,817 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-08T00:35:15,817 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T00:35:15,817 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-08T00:35:15,818 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-08T00:35:15,819 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-08T00:35:15,819 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T00:35:15,820 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-08T00:35:15,820 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-08T00:35:15,821 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-08T00:35:15,822 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-08T00:35:15,823 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-08T00:35:15,823 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-08T00:35:15,824 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-08T00:35:15,825 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-08T00:35:15,828 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-08T00:35:15,828 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=716672, jitterRate=-0.08870565891265869}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-08T00:35:15,830 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731026115808Initializing all the Stores at 1731026115809 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731026115809Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731026115809Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731026115809Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731026115809Cleaning up temporary data from old regions at 1731026115823 (+14 ms)Region opened successfully at 1731026115830 (+7 ms) 2024-11-08T00:35:15,830 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-08T00:35:15,834 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@c303831, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=3302f0f507bd/172.17.0.3:0 2024-11-08T00:35:15,835 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-08T00:35:15,836 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-08T00:35:15,836 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-08T00:35:15,836 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-08T00:35:15,837 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-08T00:35:15,837 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-08T00:35:15,837 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-08T00:35:15,840 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-08T00:35:15,841 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33483-0x10117de57140000, quorum=127.0.0.1:55789, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-08T00:35:15,847 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-08T00:35:15,848 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-08T00:35:15,848 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33483-0x10117de57140000, quorum=127.0.0.1:55789, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-08T00:35:15,858 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-08T00:35:15,858 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-08T00:35:15,859 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33483-0x10117de57140000, quorum=127.0.0.1:55789, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-08T00:35:15,868 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-08T00:35:15,870 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33483-0x10117de57140000, quorum=127.0.0.1:55789, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-08T00:35:15,879 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-08T00:35:15,882 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33483-0x10117de57140000, quorum=127.0.0.1:55789, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-08T00:35:15,889 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-08T00:35:15,900 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42513-0x10117de57140001, quorum=127.0.0.1:55789, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-08T00:35:15,900 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33483-0x10117de57140000, quorum=127.0.0.1:55789, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-08T00:35:15,900 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33483-0x10117de57140000, quorum=127.0.0.1:55789, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T00:35:15,900 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42513-0x10117de57140001, quorum=127.0.0.1:55789, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T00:35:15,900 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=3302f0f507bd,33483,1731026115449, sessionid=0x10117de57140000, setting cluster-up flag (Was=false) 2024-11-08T00:35:15,921 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42513-0x10117de57140001, quorum=127.0.0.1:55789, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T00:35:15,921 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33483-0x10117de57140000, quorum=127.0.0.1:55789, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T00:35:15,953 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-08T00:35:15,954 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=3302f0f507bd,33483,1731026115449 2024-11-08T00:35:15,974 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33483-0x10117de57140000, quorum=127.0.0.1:55789, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T00:35:15,974 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42513-0x10117de57140001, quorum=127.0.0.1:55789, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T00:35:16,005 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-08T00:35:16,007 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=3302f0f507bd,33483,1731026115449 2024-11-08T00:35:16,008 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-08T00:35:16,011 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-08T00:35:16,011 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-08T00:35:16,012 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-08T00:35:16,012 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 3302f0f507bd,33483,1731026115449 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-08T00:35:16,014 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/3302f0f507bd:0, corePoolSize=5, maxPoolSize=5 2024-11-08T00:35:16,014 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/3302f0f507bd:0, corePoolSize=5, maxPoolSize=5 2024-11-08T00:35:16,014 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/3302f0f507bd:0, corePoolSize=5, maxPoolSize=5 2024-11-08T00:35:16,014 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/3302f0f507bd:0, corePoolSize=5, maxPoolSize=5 2024-11-08T00:35:16,015 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/3302f0f507bd:0, corePoolSize=10, maxPoolSize=10 2024-11-08T00:35:16,015 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/3302f0f507bd:0, corePoolSize=1, maxPoolSize=1 2024-11-08T00:35:16,015 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/3302f0f507bd:0, corePoolSize=2, maxPoolSize=2 2024-11-08T00:35:16,015 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/3302f0f507bd:0, corePoolSize=1, maxPoolSize=1 2024-11-08T00:35:16,016 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731026146016 2024-11-08T00:35:16,016 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-08T00:35:16,016 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-08T00:35:16,016 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-08T00:35:16,016 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-08T00:35:16,016 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-08T00:35:16,016 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-08T00:35:16,017 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-08T00:35:16,017 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-08T00:35:16,017 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-08T00:35:16,017 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-08T00:35:16,017 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-08T00:35:16,017 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-08T00:35:16,018 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-08T00:35:16,018 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-08T00:35:16,018 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/3302f0f507bd:0:becomeActiveMaster-HFileCleaner.large.0-1731026116018,5,FailOnTimeoutGroup] 2024-11-08T00:35:16,018 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/3302f0f507bd:0:becomeActiveMaster-HFileCleaner.small.0-1731026116018,5,FailOnTimeoutGroup] 2024-11-08T00:35:16,018 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-08T00:35:16,018 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-08T00:35:16,018 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-08T00:35:16,018 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-08T00:35:16,019 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T00:35:16,019 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-08T00:35:16,029 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38985 is added to blk_1073741831_1007 (size=1321) 2024-11-08T00:35:16,029 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33655 is added to blk_1073741831_1007 (size=1321) 2024-11-08T00:35:16,030 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-08T00:35:16,031 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8 2024-11-08T00:35:16,037 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33655 is added to blk_1073741832_1008 (size=32) 2024-11-08T00:35:16,037 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38985 is added to blk_1073741832_1008 (size=32) 2024-11-08T00:35:16,038 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-08T00:35:16,039 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-08T00:35:16,041 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-08T00:35:16,041 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T00:35:16,042 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-08T00:35:16,042 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-08T00:35:16,043 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-08T00:35:16,043 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T00:35:16,044 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-08T00:35:16,044 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-08T00:35:16,046 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-08T00:35:16,046 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T00:35:16,046 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-08T00:35:16,046 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-08T00:35:16,048 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-08T00:35:16,048 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T00:35:16,049 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-08T00:35:16,049 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-08T00:35:16,049 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/data/hbase/meta/1588230740 2024-11-08T00:35:16,050 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/data/hbase/meta/1588230740 2024-11-08T00:35:16,051 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-08T00:35:16,051 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-08T00:35:16,052 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-08T00:35:16,053 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-08T00:35:16,056 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-08T00:35:16,056 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=752776, jitterRate=-0.04279685020446777}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-08T00:35:16,057 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731026116038Initializing all the Stores at 1731026116039 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731026116039Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731026116039Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731026116039Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731026116039Cleaning up temporary data from old regions at 1731026116051 (+12 ms)Region opened successfully at 1731026116057 (+6 ms) 2024-11-08T00:35:16,057 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-08T00:35:16,057 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-08T00:35:16,057 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-08T00:35:16,057 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-08T00:35:16,057 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-08T00:35:16,058 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-08T00:35:16,058 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731026116057Disabling compacts and flushes for region at 1731026116057Disabling writes for close at 1731026116057Writing region close event to WAL at 1731026116058 (+1 ms)Closed at 1731026116058 2024-11-08T00:35:16,060 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-08T00:35:16,060 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-08T00:35:16,060 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-08T00:35:16,061 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-08T00:35:16,063 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-08T00:35:16,075 INFO [RS:0;3302f0f507bd:42513 {}] regionserver.HRegionServer(746): ClusterId : 9ff58e9f-1114-45e2-8ccb-609e57d4527d 2024-11-08T00:35:16,075 DEBUG [RS:0;3302f0f507bd:42513 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-08T00:35:16,088 DEBUG [RS:0;3302f0f507bd:42513 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-08T00:35:16,088 DEBUG [RS:0;3302f0f507bd:42513 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-08T00:35:16,101 DEBUG [RS:0;3302f0f507bd:42513 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-08T00:35:16,102 DEBUG [RS:0;3302f0f507bd:42513 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@772d34e0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=3302f0f507bd/172.17.0.3:0 2024-11-08T00:35:16,119 DEBUG [RS:0;3302f0f507bd:42513 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;3302f0f507bd:42513 2024-11-08T00:35:16,119 INFO [RS:0;3302f0f507bd:42513 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-08T00:35:16,119 INFO [RS:0;3302f0f507bd:42513 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-08T00:35:16,119 DEBUG [RS:0;3302f0f507bd:42513 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-08T00:35:16,120 INFO [RS:0;3302f0f507bd:42513 {}] regionserver.HRegionServer(2659): reportForDuty to master=3302f0f507bd,33483,1731026115449 with port=42513, startcode=1731026115632 2024-11-08T00:35:16,120 DEBUG [RS:0;3302f0f507bd:42513 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-08T00:35:16,125 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:57577, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-11-08T00:35:16,126 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33483 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 3302f0f507bd,42513,1731026115632 2024-11-08T00:35:16,126 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33483 {}] master.ServerManager(517): Registering regionserver=3302f0f507bd,42513,1731026115632 2024-11-08T00:35:16,128 DEBUG [RS:0;3302f0f507bd:42513 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8 2024-11-08T00:35:16,128 DEBUG [RS:0;3302f0f507bd:42513 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:42193 2024-11-08T00:35:16,128 DEBUG [RS:0;3302f0f507bd:42513 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-08T00:35:16,139 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33483-0x10117de57140000, quorum=127.0.0.1:55789, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-08T00:35:16,140 DEBUG [RS:0;3302f0f507bd:42513 {}] zookeeper.ZKUtil(111): regionserver:42513-0x10117de57140001, quorum=127.0.0.1:55789, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/3302f0f507bd,42513,1731026115632 2024-11-08T00:35:16,140 WARN [RS:0;3302f0f507bd:42513 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-08T00:35:16,140 INFO [RS:0;3302f0f507bd:42513 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-08T00:35:16,140 DEBUG [RS:0;3302f0f507bd:42513 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632 2024-11-08T00:35:16,140 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [3302f0f507bd,42513,1731026115632] 2024-11-08T00:35:16,144 INFO [RS:0;3302f0f507bd:42513 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-08T00:35:16,148 INFO [RS:0;3302f0f507bd:42513 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-08T00:35:16,149 INFO [RS:0;3302f0f507bd:42513 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-08T00:35:16,149 INFO [RS:0;3302f0f507bd:42513 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-08T00:35:16,149 INFO [RS:0;3302f0f507bd:42513 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-08T00:35:16,150 INFO [RS:0;3302f0f507bd:42513 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-08T00:35:16,150 INFO [RS:0;3302f0f507bd:42513 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-08T00:35:16,150 DEBUG [RS:0;3302f0f507bd:42513 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/3302f0f507bd:0, corePoolSize=1, maxPoolSize=1 2024-11-08T00:35:16,150 DEBUG [RS:0;3302f0f507bd:42513 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/3302f0f507bd:0, corePoolSize=1, maxPoolSize=1 2024-11-08T00:35:16,150 DEBUG [RS:0;3302f0f507bd:42513 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/3302f0f507bd:0, corePoolSize=1, maxPoolSize=1 2024-11-08T00:35:16,150 DEBUG [RS:0;3302f0f507bd:42513 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/3302f0f507bd:0, corePoolSize=1, maxPoolSize=1 2024-11-08T00:35:16,150 DEBUG [RS:0;3302f0f507bd:42513 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/3302f0f507bd:0, corePoolSize=1, maxPoolSize=1 2024-11-08T00:35:16,150 DEBUG [RS:0;3302f0f507bd:42513 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/3302f0f507bd:0, corePoolSize=2, maxPoolSize=2 2024-11-08T00:35:16,150 DEBUG [RS:0;3302f0f507bd:42513 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/3302f0f507bd:0, corePoolSize=1, maxPoolSize=1 2024-11-08T00:35:16,151 DEBUG [RS:0;3302f0f507bd:42513 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/3302f0f507bd:0, corePoolSize=1, maxPoolSize=1 2024-11-08T00:35:16,151 DEBUG [RS:0;3302f0f507bd:42513 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/3302f0f507bd:0, corePoolSize=1, maxPoolSize=1 2024-11-08T00:35:16,151 DEBUG [RS:0;3302f0f507bd:42513 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/3302f0f507bd:0, corePoolSize=1, maxPoolSize=1 2024-11-08T00:35:16,151 DEBUG [RS:0;3302f0f507bd:42513 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/3302f0f507bd:0, corePoolSize=1, maxPoolSize=1 2024-11-08T00:35:16,151 DEBUG [RS:0;3302f0f507bd:42513 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/3302f0f507bd:0, corePoolSize=1, maxPoolSize=1 2024-11-08T00:35:16,151 DEBUG [RS:0;3302f0f507bd:42513 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/3302f0f507bd:0, corePoolSize=3, maxPoolSize=3 2024-11-08T00:35:16,151 DEBUG [RS:0;3302f0f507bd:42513 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/3302f0f507bd:0, corePoolSize=3, maxPoolSize=3 2024-11-08T00:35:16,151 INFO [RS:0;3302f0f507bd:42513 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-08T00:35:16,151 INFO [RS:0;3302f0f507bd:42513 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-08T00:35:16,151 INFO [RS:0;3302f0f507bd:42513 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-08T00:35:16,151 INFO [RS:0;3302f0f507bd:42513 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-08T00:35:16,152 INFO [RS:0;3302f0f507bd:42513 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-08T00:35:16,152 INFO [RS:0;3302f0f507bd:42513 {}] hbase.ChoreService(168): Chore ScheduledChore name=3302f0f507bd,42513,1731026115632-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-08T00:35:16,193 INFO [RS:0;3302f0f507bd:42513 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-08T00:35:16,193 INFO [RS:0;3302f0f507bd:42513 {}] hbase.ChoreService(168): Chore ScheduledChore name=3302f0f507bd,42513,1731026115632-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-08T00:35:16,193 INFO [RS:0;3302f0f507bd:42513 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-08T00:35:16,193 INFO [RS:0;3302f0f507bd:42513 {}] regionserver.Replication(171): 3302f0f507bd,42513,1731026115632 started 2024-11-08T00:35:16,210 INFO [RS:0;3302f0f507bd:42513 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-08T00:35:16,210 INFO [RS:0;3302f0f507bd:42513 {}] regionserver.HRegionServer(1482): Serving as 3302f0f507bd,42513,1731026115632, RpcServer on 3302f0f507bd/172.17.0.3:42513, sessionid=0x10117de57140001 2024-11-08T00:35:16,210 DEBUG [RS:0;3302f0f507bd:42513 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-08T00:35:16,210 DEBUG [RS:0;3302f0f507bd:42513 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 3302f0f507bd,42513,1731026115632 2024-11-08T00:35:16,210 DEBUG [RS:0;3302f0f507bd:42513 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '3302f0f507bd,42513,1731026115632' 2024-11-08T00:35:16,210 DEBUG [RS:0;3302f0f507bd:42513 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-08T00:35:16,211 DEBUG [RS:0;3302f0f507bd:42513 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-08T00:35:16,211 DEBUG [RS:0;3302f0f507bd:42513 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-08T00:35:16,211 DEBUG [RS:0;3302f0f507bd:42513 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-08T00:35:16,211 DEBUG [RS:0;3302f0f507bd:42513 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 3302f0f507bd,42513,1731026115632 2024-11-08T00:35:16,211 DEBUG [RS:0;3302f0f507bd:42513 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '3302f0f507bd,42513,1731026115632' 2024-11-08T00:35:16,211 DEBUG [RS:0;3302f0f507bd:42513 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-08T00:35:16,212 DEBUG [RS:0;3302f0f507bd:42513 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-08T00:35:16,212 DEBUG [RS:0;3302f0f507bd:42513 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-08T00:35:16,212 INFO [RS:0;3302f0f507bd:42513 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-08T00:35:16,212 INFO [RS:0;3302f0f507bd:42513 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-08T00:35:16,213 WARN [3302f0f507bd:33483 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-08T00:35:16,315 INFO [RS:0;3302f0f507bd:42513 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=3302f0f507bd%2C42513%2C1731026115632, suffix=, logDir=hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632, archiveDir=hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/oldWALs, maxLogs=32 2024-11-08T00:35:16,317 INFO [RS:0;3302f0f507bd:42513 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3302f0f507bd%2C42513%2C1731026115632.1731026116317 2024-11-08T00:35:16,325 INFO [RS:0;3302f0f507bd:42513 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.1731026116317 2024-11-08T00:35:16,327 DEBUG [RS:0;3302f0f507bd:42513 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39491:39491),(127.0.0.1/127.0.0.1:33405:33405)] 2024-11-08T00:35:16,463 DEBUG [3302f0f507bd:33483 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-08T00:35:16,464 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=3302f0f507bd,42513,1731026115632 2024-11-08T00:35:16,465 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 3302f0f507bd,42513,1731026115632, state=OPENING 2024-11-08T00:35:16,510 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-08T00:35:16,521 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33483-0x10117de57140000, quorum=127.0.0.1:55789, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T00:35:16,521 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42513-0x10117de57140001, quorum=127.0.0.1:55789, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T00:35:16,522 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-08T00:35:16,522 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-08T00:35:16,522 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-08T00:35:16,522 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=3302f0f507bd,42513,1731026115632}] 2024-11-08T00:35:16,677 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-08T00:35:16,680 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:39227, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-08T00:35:16,686 INFO [RS_OPEN_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-08T00:35:16,686 INFO [RS_OPEN_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-08T00:35:16,689 INFO [RS_OPEN_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=3302f0f507bd%2C42513%2C1731026115632.meta, suffix=.meta, logDir=hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632, archiveDir=hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/oldWALs, maxLogs=32 2024-11-08T00:35:16,690 INFO [RS_OPEN_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 3302f0f507bd%2C42513%2C1731026115632.meta.1731026116690.meta 2024-11-08T00:35:16,696 INFO [RS_OPEN_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.meta.1731026116690.meta 2024-11-08T00:35:16,697 DEBUG [RS_OPEN_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39491:39491),(127.0.0.1/127.0.0.1:33405:33405)] 2024-11-08T00:35:16,698 DEBUG [RS_OPEN_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-08T00:35:16,698 DEBUG [RS_OPEN_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-08T00:35:16,698 DEBUG [RS_OPEN_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-08T00:35:16,699 INFO [RS_OPEN_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-08T00:35:16,699 DEBUG [RS_OPEN_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-08T00:35:16,699 DEBUG [RS_OPEN_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-08T00:35:16,699 DEBUG [RS_OPEN_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-08T00:35:16,699 DEBUG [RS_OPEN_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-08T00:35:16,701 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-08T00:35:16,702 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-08T00:35:16,703 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T00:35:16,703 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-08T00:35:16,704 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-08T00:35:16,705 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-08T00:35:16,705 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T00:35:16,705 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-08T00:35:16,706 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-08T00:35:16,706 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-08T00:35:16,707 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T00:35:16,707 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-08T00:35:16,707 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-08T00:35:16,708 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-08T00:35:16,708 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T00:35:16,709 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-08T00:35:16,709 DEBUG [RS_OPEN_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-08T00:35:16,710 DEBUG [RS_OPEN_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/data/hbase/meta/1588230740 2024-11-08T00:35:16,711 DEBUG [RS_OPEN_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/data/hbase/meta/1588230740 2024-11-08T00:35:16,713 DEBUG [RS_OPEN_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-08T00:35:16,713 DEBUG [RS_OPEN_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-08T00:35:16,714 DEBUG [RS_OPEN_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-08T00:35:16,715 DEBUG [RS_OPEN_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-08T00:35:16,716 INFO [RS_OPEN_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=730847, jitterRate=-0.07068052887916565}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-08T00:35:16,716 DEBUG [RS_OPEN_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-08T00:35:16,717 DEBUG [RS_OPEN_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731026116699Writing region info on filesystem at 1731026116699Initializing all the Stores at 1731026116700 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731026116700Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731026116701 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731026116701Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731026116701Cleaning up temporary data from old regions at 1731026116713 (+12 ms)Running coprocessor post-open hooks at 1731026116716 (+3 ms)Region opened successfully at 1731026116717 (+1 ms) 2024-11-08T00:35:16,718 INFO [RS_OPEN_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731026116676 2024-11-08T00:35:16,721 DEBUG [RS_OPEN_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-08T00:35:16,721 INFO [RS_OPEN_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-08T00:35:16,722 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=3302f0f507bd,42513,1731026115632 2024-11-08T00:35:16,723 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 3302f0f507bd,42513,1731026115632, state=OPEN 2024-11-08T00:35:16,780 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42513-0x10117de57140001, quorum=127.0.0.1:55789, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-08T00:35:16,780 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33483-0x10117de57140000, quorum=127.0.0.1:55789, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-08T00:35:16,780 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=3302f0f507bd,42513,1731026115632 2024-11-08T00:35:16,780 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-08T00:35:16,780 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-08T00:35:16,786 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-08T00:35:16,786 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=3302f0f507bd,42513,1731026115632 in 258 msec 2024-11-08T00:35:16,790 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-08T00:35:16,790 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 726 msec 2024-11-08T00:35:16,791 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-08T00:35:16,792 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-08T00:35:16,793 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-08T00:35:16,793 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3302f0f507bd,42513,1731026115632, seqNum=-1] 2024-11-08T00:35:16,794 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-08T00:35:16,795 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:48179, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-08T00:35:16,801 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 790 msec 2024-11-08T00:35:16,802 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731026116802, completionTime=-1 2024-11-08T00:35:16,802 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-08T00:35:16,802 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-08T00:35:16,804 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-08T00:35:16,804 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731026176804 2024-11-08T00:35:16,804 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731026236804 2024-11-08T00:35:16,804 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 2 msec 2024-11-08T00:35:16,804 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3302f0f507bd,33483,1731026115449-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-08T00:35:16,804 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3302f0f507bd,33483,1731026115449-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-08T00:35:16,804 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3302f0f507bd,33483,1731026115449-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-08T00:35:16,805 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-3302f0f507bd:33483, period=300000, unit=MILLISECONDS is enabled. 2024-11-08T00:35:16,805 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-08T00:35:16,805 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-08T00:35:16,807 DEBUG [master/3302f0f507bd:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-08T00:35:16,809 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.088sec 2024-11-08T00:35:16,810 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-08T00:35:16,810 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-08T00:35:16,810 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-08T00:35:16,810 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-08T00:35:16,810 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-08T00:35:16,810 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3302f0f507bd,33483,1731026115449-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-08T00:35:16,810 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3302f0f507bd,33483,1731026115449-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-08T00:35:16,812 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-08T00:35:16,813 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-08T00:35:16,813 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3302f0f507bd,33483,1731026115449-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-08T00:35:16,893 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7ee117d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-08T00:35:16,893 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 3302f0f507bd,33483,-1 for getting cluster id 2024-11-08T00:35:16,893 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-08T00:35:16,896 DEBUG [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '9ff58e9f-1114-45e2-8ccb-609e57d4527d' 2024-11-08T00:35:16,896 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-08T00:35:16,897 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "9ff58e9f-1114-45e2-8ccb-609e57d4527d" 2024-11-08T00:35:16,897 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@759ddb62, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-08T00:35:16,897 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [3302f0f507bd,33483,-1] 2024-11-08T00:35:16,898 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-08T00:35:16,898 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-08T00:35:16,900 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:48800, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-08T00:35:16,901 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@627eec4d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-08T00:35:16,901 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-08T00:35:16,903 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3302f0f507bd,42513,1731026115632, seqNum=-1] 2024-11-08T00:35:16,903 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-08T00:35:16,905 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:37832, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-08T00:35:16,907 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=3302f0f507bd,33483,1731026115449 2024-11-08T00:35:16,908 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-08T00:35:16,911 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-08T00:35:16,929 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/3302f0f507bd:0 server-side Connection retries=45 2024-11-08T00:35:16,929 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-08T00:35:16,929 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-08T00:35:16,929 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-08T00:35:16,930 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-08T00:35:16,930 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-08T00:35:16,930 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-08T00:35:16,930 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-08T00:35:16,931 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:39151 2024-11-08T00:35:16,932 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:39151 connecting to ZooKeeper ensemble=127.0.0.1:55789 2024-11-08T00:35:16,933 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-08T00:35:16,935 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-08T00:35:16,953 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:391510x0, quorum=127.0.0.1:55789, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-08T00:35:16,953 DEBUG [Time-limited test {}] zookeeper.ZKUtil(111): regionserver:391510x0, quorum=127.0.0.1:55789, baseZNode=/hbase Set watcher on existing znode=/hbase/running 2024-11-08T00:35:16,953 DEBUG [pool-381-thread-1 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: INIT 2024-11-08T00:35:16,953 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:39151-0x10117de57140002 connected 2024-11-08T00:35:16,954 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-08T00:35:16,955 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-08T00:35:16,956 DEBUG [Time-limited test {}] zookeeper.ZKUtil(111): regionserver:39151-0x10117de57140002, quorum=127.0.0.1:55789, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-08T00:35:16,958 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39151-0x10117de57140002, quorum=127.0.0.1:55789, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-08T00:35:16,964 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=39151 2024-11-08T00:35:16,965 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=39151 2024-11-08T00:35:16,966 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=39151 2024-11-08T00:35:16,966 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=39151 2024-11-08T00:35:16,966 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=39151 2024-11-08T00:35:16,967 INFO [RS:1;3302f0f507bd:39151 {}] regionserver.HRegionServer(746): ClusterId : 9ff58e9f-1114-45e2-8ccb-609e57d4527d 2024-11-08T00:35:16,967 DEBUG [RS:1;3302f0f507bd:39151 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-08T00:35:16,974 DEBUG [RS:1;3302f0f507bd:39151 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-08T00:35:16,974 DEBUG [RS:1;3302f0f507bd:39151 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-08T00:35:16,985 DEBUG [RS:1;3302f0f507bd:39151 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-08T00:35:16,986 DEBUG [RS:1;3302f0f507bd:39151 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5cd7787e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=3302f0f507bd/172.17.0.3:0 2024-11-08T00:35:17,003 DEBUG [RS:1;3302f0f507bd:39151 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;3302f0f507bd:39151 2024-11-08T00:35:17,003 INFO [RS:1;3302f0f507bd:39151 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-08T00:35:17,003 INFO [RS:1;3302f0f507bd:39151 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-08T00:35:17,003 DEBUG [RS:1;3302f0f507bd:39151 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-08T00:35:17,004 INFO [RS:1;3302f0f507bd:39151 {}] regionserver.HRegionServer(2659): reportForDuty to master=3302f0f507bd,33483,1731026115449 with port=39151, startcode=1731026116929 2024-11-08T00:35:17,004 DEBUG [RS:1;3302f0f507bd:39151 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-08T00:35:17,006 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:43037, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.3 (auth:SIMPLE), service=RegionServerStatusService 2024-11-08T00:35:17,007 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33483 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 3302f0f507bd,39151,1731026116929 2024-11-08T00:35:17,007 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33483 {}] master.ServerManager(517): Registering regionserver=3302f0f507bd,39151,1731026116929 2024-11-08T00:35:17,009 DEBUG [RS:1;3302f0f507bd:39151 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8 2024-11-08T00:35:17,009 DEBUG [RS:1;3302f0f507bd:39151 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:42193 2024-11-08T00:35:17,009 DEBUG [RS:1;3302f0f507bd:39151 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-08T00:35:17,092 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33483-0x10117de57140000, quorum=127.0.0.1:55789, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-08T00:35:17,093 DEBUG [RS:1;3302f0f507bd:39151 {}] zookeeper.ZKUtil(111): regionserver:39151-0x10117de57140002, quorum=127.0.0.1:55789, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/3302f0f507bd,39151,1731026116929 2024-11-08T00:35:17,093 WARN [RS:1;3302f0f507bd:39151 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-08T00:35:17,094 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [3302f0f507bd,39151,1731026116929] 2024-11-08T00:35:17,094 INFO [RS:1;3302f0f507bd:39151 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-08T00:35:17,094 DEBUG [RS:1;3302f0f507bd:39151 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,39151,1731026116929 2024-11-08T00:35:17,100 INFO [RS:1;3302f0f507bd:39151 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-08T00:35:17,103 INFO [RS:1;3302f0f507bd:39151 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-08T00:35:17,104 INFO [RS:1;3302f0f507bd:39151 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-08T00:35:17,104 INFO [RS:1;3302f0f507bd:39151 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-08T00:35:17,104 INFO [RS:1;3302f0f507bd:39151 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-08T00:35:17,105 INFO [RS:1;3302f0f507bd:39151 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-08T00:35:17,105 INFO [RS:1;3302f0f507bd:39151 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-08T00:35:17,106 DEBUG [RS:1;3302f0f507bd:39151 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/3302f0f507bd:0, corePoolSize=1, maxPoolSize=1 2024-11-08T00:35:17,106 DEBUG [RS:1;3302f0f507bd:39151 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/3302f0f507bd:0, corePoolSize=1, maxPoolSize=1 2024-11-08T00:35:17,106 DEBUG [RS:1;3302f0f507bd:39151 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/3302f0f507bd:0, corePoolSize=1, maxPoolSize=1 2024-11-08T00:35:17,106 DEBUG [RS:1;3302f0f507bd:39151 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/3302f0f507bd:0, corePoolSize=1, maxPoolSize=1 2024-11-08T00:35:17,106 DEBUG [RS:1;3302f0f507bd:39151 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/3302f0f507bd:0, corePoolSize=1, maxPoolSize=1 2024-11-08T00:35:17,106 DEBUG [RS:1;3302f0f507bd:39151 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/3302f0f507bd:0, corePoolSize=2, maxPoolSize=2 2024-11-08T00:35:17,106 DEBUG [RS:1;3302f0f507bd:39151 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/3302f0f507bd:0, corePoolSize=1, maxPoolSize=1 2024-11-08T00:35:17,106 DEBUG [RS:1;3302f0f507bd:39151 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/3302f0f507bd:0, corePoolSize=1, maxPoolSize=1 2024-11-08T00:35:17,107 DEBUG [RS:1;3302f0f507bd:39151 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/3302f0f507bd:0, corePoolSize=1, maxPoolSize=1 2024-11-08T00:35:17,107 DEBUG [RS:1;3302f0f507bd:39151 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/3302f0f507bd:0, corePoolSize=1, maxPoolSize=1 2024-11-08T00:35:17,107 DEBUG [RS:1;3302f0f507bd:39151 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/3302f0f507bd:0, corePoolSize=1, maxPoolSize=1 2024-11-08T00:35:17,107 DEBUG [RS:1;3302f0f507bd:39151 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/3302f0f507bd:0, corePoolSize=1, maxPoolSize=1 2024-11-08T00:35:17,107 DEBUG [RS:1;3302f0f507bd:39151 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/3302f0f507bd:0, corePoolSize=3, maxPoolSize=3 2024-11-08T00:35:17,107 DEBUG [RS:1;3302f0f507bd:39151 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/3302f0f507bd:0, corePoolSize=3, maxPoolSize=3 2024-11-08T00:35:17,108 INFO [RS:1;3302f0f507bd:39151 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-08T00:35:17,108 INFO [RS:1;3302f0f507bd:39151 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-08T00:35:17,108 INFO [RS:1;3302f0f507bd:39151 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-08T00:35:17,108 INFO [RS:1;3302f0f507bd:39151 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-08T00:35:17,108 INFO [RS:1;3302f0f507bd:39151 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-08T00:35:17,108 INFO [RS:1;3302f0f507bd:39151 {}] hbase.ChoreService(168): Chore ScheduledChore name=3302f0f507bd,39151,1731026116929-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-08T00:35:17,125 INFO [RS:1;3302f0f507bd:39151 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-08T00:35:17,125 INFO [RS:1;3302f0f507bd:39151 {}] hbase.ChoreService(168): Chore ScheduledChore name=3302f0f507bd,39151,1731026116929-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-08T00:35:17,126 INFO [RS:1;3302f0f507bd:39151 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-08T00:35:17,126 INFO [RS:1;3302f0f507bd:39151 {}] regionserver.Replication(171): 3302f0f507bd,39151,1731026116929 started 2024-11-08T00:35:17,141 INFO [RS:1;3302f0f507bd:39151 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-08T00:35:17,142 INFO [RS:1;3302f0f507bd:39151 {}] regionserver.HRegionServer(1482): Serving as 3302f0f507bd,39151,1731026116929, RpcServer on 3302f0f507bd/172.17.0.3:39151, sessionid=0x10117de57140002 2024-11-08T00:35:17,142 DEBUG [RS:1;3302f0f507bd:39151 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-08T00:35:17,142 INFO [Time-limited test {}] hbase.HBaseTestingUtil(2882): Started new server=Thread[RS:1;3302f0f507bd:39151,5,FailOnTimeoutGroup] 2024-11-08T00:35:17,142 DEBUG [RS:1;3302f0f507bd:39151 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 3302f0f507bd,39151,1731026116929 2024-11-08T00:35:17,142 DEBUG [RS:1;3302f0f507bd:39151 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '3302f0f507bd,39151,1731026116929' 2024-11-08T00:35:17,142 DEBUG [RS:1;3302f0f507bd:39151 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-08T00:35:17,142 INFO [Time-limited test {}] wal.TestLogRolling(207): Replication=2 2024-11-08T00:35:17,142 DEBUG [RS:1;3302f0f507bd:39151 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-08T00:35:17,142 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-08T00:35:17,143 DEBUG [RS:1;3302f0f507bd:39151 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-08T00:35:17,143 DEBUG [RS:1;3302f0f507bd:39151 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-08T00:35:17,143 DEBUG [RS:1;3302f0f507bd:39151 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 3302f0f507bd,39151,1731026116929 2024-11-08T00:35:17,143 DEBUG [RS:1;3302f0f507bd:39151 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '3302f0f507bd,39151,1731026116929' 2024-11-08T00:35:17,143 DEBUG [RS:1;3302f0f507bd:39151 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-08T00:35:17,143 DEBUG [RS:1;3302f0f507bd:39151 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-08T00:35:17,143 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.AsyncConnectionImpl(321): The fetched master address is 3302f0f507bd,33483,1731026115449 2024-11-08T00:35:17,143 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@6441e52d 2024-11-08T00:35:17,144 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-08T00:35:17,144 DEBUG [RS:1;3302f0f507bd:39151 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-08T00:35:17,144 INFO [RS:1;3302f0f507bd:39151 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-08T00:35:17,144 INFO [RS:1;3302f0f507bd:39151 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-08T00:35:17,145 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:48808, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-08T00:35:17,146 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33483 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-08T00:35:17,146 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33483 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-08T00:35:17,146 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33483 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.3 create 'TestLogRolling-testLogRollOnDatanodeDeath', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-08T00:35:17,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33483 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath 2024-11-08T00:35:17,149 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_PRE_OPERATION 2024-11-08T00:35:17,150 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T00:35:17,150 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33483 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.3 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRollOnDatanodeDeath" procId is: 4 2024-11-08T00:35:17,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33483 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-08T00:35:17,151 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-08T00:35:17,158 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38985 is added to blk_1073741835_1011 (size=393) 2024-11-08T00:35:17,158 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33655 is added to blk_1073741835_1011 (size=393) 2024-11-08T00:35:17,160 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => b1f95ba699b0327bbfb9ea73b592e2f6, NAME => 'TestLogRolling-testLogRollOnDatanodeDeath,,1731026117146.b1f95ba699b0327bbfb9ea73b592e2f6.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRollOnDatanodeDeath', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8 2024-11-08T00:35:17,166 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33655 is added to blk_1073741836_1012 (size=76) 2024-11-08T00:35:17,167 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38985 is added to blk_1073741836_1012 (size=76) 2024-11-08T00:35:17,167 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnDatanodeDeath,,1731026117146.b1f95ba699b0327bbfb9ea73b592e2f6.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-08T00:35:17,167 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1722): Closing b1f95ba699b0327bbfb9ea73b592e2f6, disabling compactions & flushes 2024-11-08T00:35:17,168 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnDatanodeDeath,,1731026117146.b1f95ba699b0327bbfb9ea73b592e2f6. 2024-11-08T00:35:17,168 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1731026117146.b1f95ba699b0327bbfb9ea73b592e2f6. 2024-11-08T00:35:17,168 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1731026117146.b1f95ba699b0327bbfb9ea73b592e2f6. after waiting 0 ms 2024-11-08T00:35:17,168 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnDatanodeDeath,,1731026117146.b1f95ba699b0327bbfb9ea73b592e2f6. 2024-11-08T00:35:17,168 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1731026117146.b1f95ba699b0327bbfb9ea73b592e2f6. 2024-11-08T00:35:17,168 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1676): Region close journal for b1f95ba699b0327bbfb9ea73b592e2f6: Waiting for close lock at 1731026117167Disabling compacts and flushes for region at 1731026117167Disabling writes for close at 1731026117168 (+1 ms)Writing region close event to WAL at 1731026117168Closed at 1731026117168 2024-11-08T00:35:17,169 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_ADD_TO_META 2024-11-08T00:35:17,170 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRollOnDatanodeDeath,,1731026117146.b1f95ba699b0327bbfb9ea73b592e2f6.","families":{"info":[{"qualifier":"regioninfo","vlen":75,"tag":[],"timestamp":"1731026117169"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731026117169"}]},"ts":"1731026117169"} 2024-11-08T00:35:17,172 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-08T00:35:17,174 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-08T00:35:17,174 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnDatanodeDeath","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731026117174"}]},"ts":"1731026117174"} 2024-11-08T00:35:17,177 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnDatanodeDeath, state=ENABLING in hbase:meta 2024-11-08T00:35:17,177 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=b1f95ba699b0327bbfb9ea73b592e2f6, ASSIGN}] 2024-11-08T00:35:17,179 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=b1f95ba699b0327bbfb9ea73b592e2f6, ASSIGN 2024-11-08T00:35:17,181 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=b1f95ba699b0327bbfb9ea73b592e2f6, ASSIGN; state=OFFLINE, location=3302f0f507bd,42513,1731026115632; forceNewPlan=false, retain=false 2024-11-08T00:35:17,246 INFO [RS:1;3302f0f507bd:39151 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=3302f0f507bd%2C39151%2C1731026116929, suffix=, logDir=hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,39151,1731026116929, archiveDir=hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/oldWALs, maxLogs=32 2024-11-08T00:35:17,246 INFO [RS:1;3302f0f507bd:39151 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3302f0f507bd%2C39151%2C1731026116929.1731026117246 2024-11-08T00:35:17,252 INFO [RS:1;3302f0f507bd:39151 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,39151,1731026116929/3302f0f507bd%2C39151%2C1731026116929.1731026117246 2024-11-08T00:35:17,256 DEBUG [RS:1;3302f0f507bd:39151 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33405:33405),(127.0.0.1/127.0.0.1:39491:39491)] 2024-11-08T00:35:17,332 INFO [3302f0f507bd:33483 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-11-08T00:35:17,332 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=b1f95ba699b0327bbfb9ea73b592e2f6, regionState=OPENING, regionLocation=3302f0f507bd,42513,1731026115632 2024-11-08T00:35:17,335 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=b1f95ba699b0327bbfb9ea73b592e2f6, ASSIGN because future has completed 2024-11-08T00:35:17,336 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure b1f95ba699b0327bbfb9ea73b592e2f6, server=3302f0f507bd,42513,1731026115632}] 2024-11-08T00:35:17,494 INFO [RS_OPEN_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRollOnDatanodeDeath,,1731026117146.b1f95ba699b0327bbfb9ea73b592e2f6. 2024-11-08T00:35:17,494 DEBUG [RS_OPEN_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => b1f95ba699b0327bbfb9ea73b592e2f6, NAME => 'TestLogRolling-testLogRollOnDatanodeDeath,,1731026117146.b1f95ba699b0327bbfb9ea73b592e2f6.', STARTKEY => '', ENDKEY => ''} 2024-11-08T00:35:17,495 DEBUG [RS_OPEN_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRollOnDatanodeDeath b1f95ba699b0327bbfb9ea73b592e2f6 2024-11-08T00:35:17,495 DEBUG [RS_OPEN_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnDatanodeDeath,,1731026117146.b1f95ba699b0327bbfb9ea73b592e2f6.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-08T00:35:17,495 DEBUG [RS_OPEN_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for b1f95ba699b0327bbfb9ea73b592e2f6 2024-11-08T00:35:17,495 DEBUG [RS_OPEN_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for b1f95ba699b0327bbfb9ea73b592e2f6 2024-11-08T00:35:17,497 INFO [StoreOpener-b1f95ba699b0327bbfb9ea73b592e2f6-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region b1f95ba699b0327bbfb9ea73b592e2f6 2024-11-08T00:35:17,498 INFO [StoreOpener-b1f95ba699b0327bbfb9ea73b592e2f6-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region b1f95ba699b0327bbfb9ea73b592e2f6 columnFamilyName info 2024-11-08T00:35:17,498 DEBUG [StoreOpener-b1f95ba699b0327bbfb9ea73b592e2f6-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T00:35:17,499 INFO [StoreOpener-b1f95ba699b0327bbfb9ea73b592e2f6-1 {}] regionserver.HStore(327): Store=b1f95ba699b0327bbfb9ea73b592e2f6/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-08T00:35:17,499 DEBUG [RS_OPEN_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for b1f95ba699b0327bbfb9ea73b592e2f6 2024-11-08T00:35:17,500 DEBUG [RS_OPEN_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b1f95ba699b0327bbfb9ea73b592e2f6 2024-11-08T00:35:17,500 DEBUG [RS_OPEN_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b1f95ba699b0327bbfb9ea73b592e2f6 2024-11-08T00:35:17,500 DEBUG [RS_OPEN_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for b1f95ba699b0327bbfb9ea73b592e2f6 2024-11-08T00:35:17,500 DEBUG [RS_OPEN_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for b1f95ba699b0327bbfb9ea73b592e2f6 2024-11-08T00:35:17,502 DEBUG [RS_OPEN_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for b1f95ba699b0327bbfb9ea73b592e2f6 2024-11-08T00:35:17,505 DEBUG [RS_OPEN_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b1f95ba699b0327bbfb9ea73b592e2f6/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-08T00:35:17,505 INFO [RS_OPEN_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened b1f95ba699b0327bbfb9ea73b592e2f6; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=839106, jitterRate=0.06697964668273926}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-08T00:35:17,505 DEBUG [RS_OPEN_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for b1f95ba699b0327bbfb9ea73b592e2f6 2024-11-08T00:35:17,506 DEBUG [RS_OPEN_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for b1f95ba699b0327bbfb9ea73b592e2f6: Running coprocessor pre-open hook at 1731026117495Writing region info on filesystem at 1731026117495Initializing all the Stores at 1731026117496 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731026117497 (+1 ms)Cleaning up temporary data from old regions at 1731026117500 (+3 ms)Running coprocessor post-open hooks at 1731026117505 (+5 ms)Region opened successfully at 1731026117506 (+1 ms) 2024-11-08T00:35:17,507 INFO [RS_OPEN_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRollOnDatanodeDeath,,1731026117146.b1f95ba699b0327bbfb9ea73b592e2f6., pid=6, masterSystemTime=1731026117489 2024-11-08T00:35:17,510 DEBUG [RS_OPEN_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRollOnDatanodeDeath,,1731026117146.b1f95ba699b0327bbfb9ea73b592e2f6. 2024-11-08T00:35:17,510 INFO [RS_OPEN_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRollOnDatanodeDeath,,1731026117146.b1f95ba699b0327bbfb9ea73b592e2f6. 2024-11-08T00:35:17,511 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=b1f95ba699b0327bbfb9ea73b592e2f6, regionState=OPEN, openSeqNum=2, regionLocation=3302f0f507bd,42513,1731026115632 2024-11-08T00:35:17,514 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure b1f95ba699b0327bbfb9ea73b592e2f6, server=3302f0f507bd,42513,1731026115632 because future has completed 2024-11-08T00:35:17,519 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-08T00:35:17,519 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure b1f95ba699b0327bbfb9ea73b592e2f6, server=3302f0f507bd,42513,1731026115632 in 179 msec 2024-11-08T00:35:17,522 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-08T00:35:17,522 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=b1f95ba699b0327bbfb9ea73b592e2f6, ASSIGN in 342 msec 2024-11-08T00:35:17,524 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-08T00:35:17,524 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnDatanodeDeath","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731026117524"}]},"ts":"1731026117524"} 2024-11-08T00:35:17,527 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnDatanodeDeath, state=ENABLED in hbase:meta 2024-11-08T00:35:17,528 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_POST_OPERATION 2024-11-08T00:35:17,531 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath in 382 msec 2024-11-08T00:35:22,203 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-08T00:35:22,210 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:35:22,227 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:35:22,230 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:35:22,231 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:35:22,243 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-08T00:35:22,244 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-08T00:35:22,245 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath 2024-11-08T00:35:22,245 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath Metrics about Tables on a single HBase RegionServer 2024-11-08T00:35:22,245 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-08T00:35:22,245 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-08T00:35:22,246 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRollOnDatanodeDeath' 2024-11-08T00:35:27,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33483 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-08T00:35:27,174 INFO [RPCClient-NioEventLoopGroup-4-11 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRollOnDatanodeDeath completed 2024-11-08T00:35:27,174 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRollOnDatanodeDeath,, stopping at row=TestLogRolling-testLogRollOnDatanodeDeath ,, for max=2147483647 with caching=100 2024-11-08T00:35:27,178 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRollOnDatanodeDeath 2024-11-08T00:35:27,178 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRollOnDatanodeDeath,,1731026117146.b1f95ba699b0327bbfb9ea73b592e2f6. 2024-11-08T00:35:27,200 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-08T00:35:27,204 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-08T00:35:27,205 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-08T00:35:27,205 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-08T00:35:27,205 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-08T00:35:27,206 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7ab1ed71{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/33fe3ad3-2016-67dd-bf13-d3c46fb5d9ce/hadoop.log.dir/,AVAILABLE} 2024-11-08T00:35:27,206 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7c141b19{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-08T00:35:27,327 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@325d7ee2{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/33fe3ad3-2016-67dd-bf13-d3c46fb5d9ce/java.io.tmpdir/jetty-localhost-34459-hadoop-hdfs-3_4_1-tests_jar-_-any-9665572724381582668/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-08T00:35:27,328 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@15751333{HTTP/1.1, (http/1.1)}{localhost:34459} 2024-11-08T00:35:27,328 INFO [Time-limited test {}] server.Server(415): Started @121425ms 2024-11-08T00:35:27,329 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-08T00:35:27,366 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-08T00:35:27,369 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-08T00:35:27,370 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-08T00:35:27,370 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-08T00:35:27,370 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-08T00:35:27,371 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7cde9b58{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/33fe3ad3-2016-67dd-bf13-d3c46fb5d9ce/hadoop.log.dir/,AVAILABLE} 2024-11-08T00:35:27,371 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5cbf28ae{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-08T00:35:27,486 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@314d8ec{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/33fe3ad3-2016-67dd-bf13-d3c46fb5d9ce/java.io.tmpdir/jetty-localhost-45373-hadoop-hdfs-3_4_1-tests_jar-_-any-12388973300250569071/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-08T00:35:27,486 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@49584ab0{HTTP/1.1, (http/1.1)}{localhost:45373} 2024-11-08T00:35:27,486 INFO [Time-limited test {}] server.Server(415): Started @121583ms 2024-11-08T00:35:27,488 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-08T00:35:27,529 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-08T00:35:27,532 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-08T00:35:27,533 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-08T00:35:27,533 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-08T00:35:27,533 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-08T00:35:27,533 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@37673872{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/33fe3ad3-2016-67dd-bf13-d3c46fb5d9ce/hadoop.log.dir/,AVAILABLE} 2024-11-08T00:35:27,534 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@92842f7{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-08T00:35:27,644 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@603e24c7{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/33fe3ad3-2016-67dd-bf13-d3c46fb5d9ce/java.io.tmpdir/jetty-localhost-33767-hadoop-hdfs-3_4_1-tests_jar-_-any-3517903295021391758/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-08T00:35:27,644 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@33f262d4{HTTP/1.1, (http/1.1)}{localhost:33767} 2024-11-08T00:35:27,644 INFO [Time-limited test {}] server.Server(415): Started @121741ms 2024-11-08T00:35:27,645 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-08T00:35:29,051 WARN [Thread-868 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/33fe3ad3-2016-67dd-bf13-d3c46fb5d9ce/cluster_94d4fa05-7ba4-b350-d218-42d4a2c67eba/data/data5/current/BP-648086526-172.17.0.3-1731026113172/current, will proceed with Du for space computation calculation, 2024-11-08T00:35:29,051 WARN [Thread-869 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/33fe3ad3-2016-67dd-bf13-d3c46fb5d9ce/cluster_94d4fa05-7ba4-b350-d218-42d4a2c67eba/data/data6/current/BP-648086526-172.17.0.3-1731026113172/current, will proceed with Du for space computation calculation, 2024-11-08T00:35:29,074 WARN [Thread-809 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-08T00:35:29,076 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xa6797accc9802ab7 with lease ID 0xafb28d9be54f5e50: Processing first storage report for DS-79fac2fc-1df6-4762-84aa-e7ca600b7ca6 from datanode DatanodeRegistration(127.0.0.1:34377, datanodeUuid=9f14824b-723a-48c9-a12a-a80f2a62b14a, infoPort=42245, infoSecurePort=0, ipcPort=35837, storageInfo=lv=-57;cid=testClusterID;nsid=771899493;c=1731026113172) 2024-11-08T00:35:29,076 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa6797accc9802ab7 with lease ID 0xafb28d9be54f5e50: from storage DS-79fac2fc-1df6-4762-84aa-e7ca600b7ca6 node DatanodeRegistration(127.0.0.1:34377, datanodeUuid=9f14824b-723a-48c9-a12a-a80f2a62b14a, infoPort=42245, infoSecurePort=0, ipcPort=35837, storageInfo=lv=-57;cid=testClusterID;nsid=771899493;c=1731026113172), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-08T00:35:29,076 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xa6797accc9802ab7 with lease ID 0xafb28d9be54f5e50: Processing first storage report for DS-ff53be61-ac02-4be2-9d64-f8a854b0a71a from datanode DatanodeRegistration(127.0.0.1:34377, datanodeUuid=9f14824b-723a-48c9-a12a-a80f2a62b14a, infoPort=42245, infoSecurePort=0, ipcPort=35837, storageInfo=lv=-57;cid=testClusterID;nsid=771899493;c=1731026113172) 2024-11-08T00:35:29,076 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa6797accc9802ab7 with lease ID 0xafb28d9be54f5e50: from storage DS-ff53be61-ac02-4be2-9d64-f8a854b0a71a node DatanodeRegistration(127.0.0.1:34377, datanodeUuid=9f14824b-723a-48c9-a12a-a80f2a62b14a, infoPort=42245, infoSecurePort=0, ipcPort=35837, storageInfo=lv=-57;cid=testClusterID;nsid=771899493;c=1731026113172), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-08T00:35:29,231 WARN [Thread-879 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/33fe3ad3-2016-67dd-bf13-d3c46fb5d9ce/cluster_94d4fa05-7ba4-b350-d218-42d4a2c67eba/data/data7/current/BP-648086526-172.17.0.3-1731026113172/current, will proceed with Du for space computation calculation, 2024-11-08T00:35:29,231 WARN [Thread-880 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/33fe3ad3-2016-67dd-bf13-d3c46fb5d9ce/cluster_94d4fa05-7ba4-b350-d218-42d4a2c67eba/data/data8/current/BP-648086526-172.17.0.3-1731026113172/current, will proceed with Du for space computation calculation, 2024-11-08T00:35:29,250 WARN [Thread-831 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-08T00:35:29,253 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x8276981ab05b32ad with lease ID 0xafb28d9be54f5e51: Processing first storage report for DS-6d4ce634-4f50-4215-ac28-47265c61cdfe from datanode DatanodeRegistration(127.0.0.1:36123, datanodeUuid=2d4bebcb-cccc-4031-93f0-312d464b762f, infoPort=41445, infoSecurePort=0, ipcPort=46293, storageInfo=lv=-57;cid=testClusterID;nsid=771899493;c=1731026113172) 2024-11-08T00:35:29,253 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8276981ab05b32ad with lease ID 0xafb28d9be54f5e51: from storage DS-6d4ce634-4f50-4215-ac28-47265c61cdfe node DatanodeRegistration(127.0.0.1:36123, datanodeUuid=2d4bebcb-cccc-4031-93f0-312d464b762f, infoPort=41445, infoSecurePort=0, ipcPort=46293, storageInfo=lv=-57;cid=testClusterID;nsid=771899493;c=1731026113172), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-08T00:35:29,253 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x8276981ab05b32ad with lease ID 0xafb28d9be54f5e51: Processing first storage report for DS-2b4531fe-9485-457f-949f-af3818bc87ea from datanode DatanodeRegistration(127.0.0.1:36123, datanodeUuid=2d4bebcb-cccc-4031-93f0-312d464b762f, infoPort=41445, infoSecurePort=0, ipcPort=46293, storageInfo=lv=-57;cid=testClusterID;nsid=771899493;c=1731026113172) 2024-11-08T00:35:29,253 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8276981ab05b32ad with lease ID 0xafb28d9be54f5e51: from storage DS-2b4531fe-9485-457f-949f-af3818bc87ea node DatanodeRegistration(127.0.0.1:36123, datanodeUuid=2d4bebcb-cccc-4031-93f0-312d464b762f, infoPort=41445, infoSecurePort=0, ipcPort=46293, storageInfo=lv=-57;cid=testClusterID;nsid=771899493;c=1731026113172), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-08T00:35:29,284 WARN [Thread-891 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/33fe3ad3-2016-67dd-bf13-d3c46fb5d9ce/cluster_94d4fa05-7ba4-b350-d218-42d4a2c67eba/data/data10/current/BP-648086526-172.17.0.3-1731026113172/current, will proceed with Du for space computation calculation, 2024-11-08T00:35:29,284 WARN [Thread-890 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/33fe3ad3-2016-67dd-bf13-d3c46fb5d9ce/cluster_94d4fa05-7ba4-b350-d218-42d4a2c67eba/data/data9/current/BP-648086526-172.17.0.3-1731026113172/current, will proceed with Du for space computation calculation, 2024-11-08T00:35:29,306 WARN [Thread-853 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-08T00:35:29,308 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x14cafe8d3e1f662 with lease ID 0xafb28d9be54f5e52: Processing first storage report for DS-8fc96e2d-ca22-49d5-a21f-b7aea3ee6969 from datanode DatanodeRegistration(127.0.0.1:39015, datanodeUuid=eb89e8ff-cdbd-4b0e-b861-7aaf5c59eea8, infoPort=36951, infoSecurePort=0, ipcPort=41275, storageInfo=lv=-57;cid=testClusterID;nsid=771899493;c=1731026113172) 2024-11-08T00:35:29,308 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x14cafe8d3e1f662 with lease ID 0xafb28d9be54f5e52: from storage DS-8fc96e2d-ca22-49d5-a21f-b7aea3ee6969 node DatanodeRegistration(127.0.0.1:39015, datanodeUuid=eb89e8ff-cdbd-4b0e-b861-7aaf5c59eea8, infoPort=36951, infoSecurePort=0, ipcPort=41275, storageInfo=lv=-57;cid=testClusterID;nsid=771899493;c=1731026113172), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-08T00:35:29,308 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x14cafe8d3e1f662 with lease ID 0xafb28d9be54f5e52: Processing first storage report for DS-d0bce718-ac22-43a2-9c88-2f66ae71e375 from datanode DatanodeRegistration(127.0.0.1:39015, datanodeUuid=eb89e8ff-cdbd-4b0e-b861-7aaf5c59eea8, infoPort=36951, infoSecurePort=0, ipcPort=41275, storageInfo=lv=-57;cid=testClusterID;nsid=771899493;c=1731026113172) 2024-11-08T00:35:29,308 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x14cafe8d3e1f662 with lease ID 0xafb28d9be54f5e52: from storage DS-d0bce718-ac22-43a2-9c88-2f66ae71e375 node DatanodeRegistration(127.0.0.1:39015, datanodeUuid=eb89e8ff-cdbd-4b0e-b861-7aaf5c59eea8, infoPort=36951, infoSecurePort=0, ipcPort=41275, storageInfo=lv=-57;cid=testClusterID;nsid=771899493;c=1731026113172), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-08T00:35:29,380 WARN [ResponseProcessor for block BP-648086526-172.17.0.3-1731026113172:blk_1073741834_1010 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-648086526-172.17.0.3-1731026113172:blk_1073741834_1010 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-08T00:35:29,380 WARN [ResponseProcessor for block BP-648086526-172.17.0.3-1731026113172:blk_1073741830_1006 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-648086526-172.17.0.3-1731026113172:blk_1073741830_1006 java.io.IOException: Bad response ERROR for BP-648086526-172.17.0.3-1731026113172:blk_1073741830_1006 from datanode DatanodeInfoWithStorage[127.0.0.1:38985,DS-5e11058e-8356-4495-ab75-8441e34f8bac,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-08T00:35:29,380 WARN [ResponseProcessor for block BP-648086526-172.17.0.3-1731026113172:blk_1073741833_1009 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-648086526-172.17.0.3-1731026113172:blk_1073741833_1009 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-08T00:35:29,381 WARN [ResponseProcessor for block BP-648086526-172.17.0.3-1731026113172:blk_1073741837_1013 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-648086526-172.17.0.3-1731026113172:blk_1073741837_1013 java.io.IOException: Bad response ERROR for BP-648086526-172.17.0.3-1731026113172:blk_1073741837_1013 from datanode DatanodeInfoWithStorage[127.0.0.1:38985,DS-5e11058e-8356-4495-ab75-8441e34f8bac,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-08T00:35:29,381 WARN [DataStreamer for file /user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.1731026116317 block BP-648086526-172.17.0.3-1731026113172:blk_1073741833_1009 {}] hdfs.DataStreamer(1731): Error Recovery for BP-648086526-172.17.0.3-1731026113172:blk_1073741833_1009 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38985,DS-5e11058e-8356-4495-ab75-8441e34f8bac,DISK], DatanodeInfoWithStorage[127.0.0.1:33655,DS-e64dccec-6029-4220-b1b3-3165180151b4,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38985,DS-5e11058e-8356-4495-ab75-8441e34f8bac,DISK]) is bad. 2024-11-08T00:35:29,381 WARN [DataStreamer for file /user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,39151,1731026116929/3302f0f507bd%2C39151%2C1731026116929.1731026117246 block BP-648086526-172.17.0.3-1731026113172:blk_1073741837_1013 {}] hdfs.DataStreamer(1731): Error Recovery for BP-648086526-172.17.0.3-1731026113172:blk_1073741837_1013 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33655,DS-e64dccec-6029-4220-b1b3-3165180151b4,DISK], DatanodeInfoWithStorage[127.0.0.1:38985,DS-5e11058e-8356-4495-ab75-8441e34f8bac,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:38985,DS-5e11058e-8356-4495-ab75-8441e34f8bac,DISK]) is bad. 2024-11-08T00:35:29,381 WARN [DataStreamer for file /user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/MasterData/WALs/3302f0f507bd,33483,1731026115449/3302f0f507bd%2C33483%2C1731026115449.1731026115799 block BP-648086526-172.17.0.3-1731026113172:blk_1073741830_1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-648086526-172.17.0.3-1731026113172:blk_1073741830_1006 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33655,DS-e64dccec-6029-4220-b1b3-3165180151b4,DISK], DatanodeInfoWithStorage[127.0.0.1:38985,DS-5e11058e-8356-4495-ab75-8441e34f8bac,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:38985,DS-5e11058e-8356-4495-ab75-8441e34f8bac,DISK]) is bad. 2024-11-08T00:35:29,381 WARN [DataStreamer for file /user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.meta.1731026116690.meta block BP-648086526-172.17.0.3-1731026113172:blk_1073741834_1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-648086526-172.17.0.3-1731026113172:blk_1073741834_1010 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38985,DS-5e11058e-8356-4495-ab75-8441e34f8bac,DISK], DatanodeInfoWithStorage[127.0.0.1:33655,DS-e64dccec-6029-4220-b1b3-3165180151b4,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38985,DS-5e11058e-8356-4495-ab75-8441e34f8bac,DISK]) is bad. 2024-11-08T00:35:29,381 WARN [PacketResponder: BP-648086526-172.17.0.3-1731026113172:blk_1073741830_1006, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:38985] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-08T00:35:29,382 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1374943833_22 at /127.0.0.1:39662 [Receiving block BP-648086526-172.17.0.3-1731026113172:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:33655:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:39662 dst: /127.0.0.1:33655 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-08T00:35:29,382 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1440924304_22 at /127.0.0.1:39610 [Receiving block BP-648086526-172.17.0.3-1731026113172:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:33655:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:39610 dst: /127.0.0.1:33655 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-08T00:35:29,382 WARN [PacketResponder: BP-648086526-172.17.0.3-1731026113172:blk_1073741837_1013, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:38985] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Broken pipe at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-08T00:35:29,383 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@272348fe{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-08T00:35:29,383 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-561431489_22 at /127.0.0.1:39692 [Receiving block BP-648086526-172.17.0.3-1731026113172:blk_1073741837_1013] {}] datanode.DataXceiver(331): 127.0.0.1:33655:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:39692 dst: /127.0.0.1:33655 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-08T00:35:29,383 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1440924304_22 at /127.0.0.1:40186 [Receiving block BP-648086526-172.17.0.3-1731026113172:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:38985:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:40186 dst: /127.0.0.1:38985 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-08T00:35:29,384 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@10b53169{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-08T00:35:29,384 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-08T00:35:29,384 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4bb19ef9{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-08T00:35:29,385 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@32403ac6{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/33fe3ad3-2016-67dd-bf13-d3c46fb5d9ce/hadoop.log.dir/,STOPPED} 2024-11-08T00:35:29,384 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1374943833_22 at /127.0.0.1:40200 [Receiving block BP-648086526-172.17.0.3-1731026113172:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:38985:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:40200 dst: /127.0.0.1:38985 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-08T00:35:29,383 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1374943833_22 at /127.0.0.1:40214 [Receiving block BP-648086526-172.17.0.3-1731026113172:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:38985:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:40214 dst: /127.0.0.1:38985 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-08T00:35:29,385 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1374943833_22 at /127.0.0.1:39652 [Receiving block BP-648086526-172.17.0.3-1731026113172:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:33655:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:39652 dst: /127.0.0.1:33655 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-08T00:35:29,384 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-561431489_22 at /127.0.0.1:40238 [Receiving block BP-648086526-172.17.0.3-1731026113172:blk_1073741837_1013] {}] datanode.DataXceiver(331): 127.0.0.1:38985:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:40238 dst: /127.0.0.1:38985 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-08T00:35:29,387 WARN [BP-648086526-172.17.0.3-1731026113172 heartbeating to localhost/127.0.0.1:42193 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-08T00:35:29,387 WARN [BP-648086526-172.17.0.3-1731026113172 heartbeating to localhost/127.0.0.1:42193 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-648086526-172.17.0.3-1731026113172 (Datanode Uuid 1be0d67d-0789-4e98-b5fc-be9cc512b3fb) service to localhost/127.0.0.1:42193 2024-11-08T00:35:29,388 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/33fe3ad3-2016-67dd-bf13-d3c46fb5d9ce/cluster_94d4fa05-7ba4-b350-d218-42d4a2c67eba/data/data3/current/BP-648086526-172.17.0.3-1731026113172 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-08T00:35:29,388 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-08T00:35:29,388 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/33fe3ad3-2016-67dd-bf13-d3c46fb5d9ce/cluster_94d4fa05-7ba4-b350-d218-42d4a2c67eba/data/data4/current/BP-648086526-172.17.0.3-1731026113172 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-08T00:35:29,388 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-08T00:35:29,389 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-08T00:35:29,391 WARN [DataStreamer for file /user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.meta.1731026116690.meta block BP-648086526-172.17.0.3-1731026113172:blk_1073741834_1010 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741834_1010 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-08T00:35:29,391 WARN [DataStreamer for file /user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/MasterData/WALs/3302f0f507bd,33483,1731026115449/3302f0f507bd%2C33483%2C1731026115449.1731026115799 block BP-648086526-172.17.0.3-1731026113172:blk_1073741830_1006 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741830_1006 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-08T00:35:29,391 WARN [DataStreamer for file /user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,39151,1731026116929/3302f0f507bd%2C39151%2C1731026116929.1731026117246 block BP-648086526-172.17.0.3-1731026113172:blk_1073741837_1013 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741837_1013 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-08T00:35:29,391 WARN [DataStreamer for file /user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.1731026116317 block BP-648086526-172.17.0.3-1731026113172:blk_1073741833_1009 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741833_1009 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-08T00:35:29,392 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5538b075{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-08T00:35:29,392 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3f87a993{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-08T00:35:29,392 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-08T00:35:29,392 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1bb5d847{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-08T00:35:29,392 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1bf32f74{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/33fe3ad3-2016-67dd-bf13-d3c46fb5d9ce/hadoop.log.dir/,STOPPED} 2024-11-08T00:35:29,394 WARN [BP-648086526-172.17.0.3-1731026113172 heartbeating to localhost/127.0.0.1:42193 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-08T00:35:29,394 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-08T00:35:29,394 WARN [BP-648086526-172.17.0.3-1731026113172 heartbeating to localhost/127.0.0.1:42193 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-648086526-172.17.0.3-1731026113172 (Datanode Uuid 9b02e5a7-7116-4049-af6b-7af741ab247e) service to localhost/127.0.0.1:42193 2024-11-08T00:35:29,394 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-08T00:35:29,394 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/33fe3ad3-2016-67dd-bf13-d3c46fb5d9ce/cluster_94d4fa05-7ba4-b350-d218-42d4a2c67eba/data/data1/current/BP-648086526-172.17.0.3-1731026113172 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-08T00:35:29,394 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/33fe3ad3-2016-67dd-bf13-d3c46fb5d9ce/cluster_94d4fa05-7ba4-b350-d218-42d4a2c67eba/data/data2/current/BP-648086526-172.17.0.3-1731026113172 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-08T00:35:29,395 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-08T00:35:29,398 DEBUG [RPCClient-NioEventLoopGroup-4-10 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRollOnDatanodeDeath', row='row0002', locateType=CURRENT is [region=TestLogRolling-testLogRollOnDatanodeDeath,,1731026117146.b1f95ba699b0327bbfb9ea73b592e2f6., hostname=3302f0f507bd,42513,1731026115632, seqNum=2] 2024-11-08T00:35:29,400 ERROR [FSHLog-0-hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8-prefix:3302f0f507bd,42513,1731026115632 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33655,DS-e64dccec-6029-4220-b1b3-3165180151b4,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-08T00:35:29,400 WARN [FSHLog-0-hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8-prefix:3302f0f507bd,42513,1731026115632 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33655,DS-e64dccec-6029-4220-b1b3-3165180151b4,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-08T00:35:29,400 INFO [regionserver/3302f0f507bd:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33655,DS-e64dccec-6029-4220-b1b3-3165180151b4,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-08T00:35:29,400 DEBUG [regionserver/3302f0f507bd:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 3302f0f507bd%2C42513%2C1731026115632:(num 1731026116317) roll requested 2024-11-08T00:35:29,401 INFO [regionserver/3302f0f507bd:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3302f0f507bd%2C42513%2C1731026115632.1731026129401 2024-11-08T00:35:29,406 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:35:29,406 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:35:29,406 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:35:29,406 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:35:29,407 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:35:29,407 INFO [regionserver/3302f0f507bd:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.1731026116317 with entries=1, filesize=455 B; new WAL /user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.1731026129401 2024-11-08T00:35:29,407 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33655,DS-e64dccec-6029-4220-b1b3-3165180151b4,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-08T00:35:29,407 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33655,DS-e64dccec-6029-4220-b1b3-3165180151b4,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-08T00:35:29,408 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(47): Initialize RecoverLeaseFSUtils 2024-11-08T00:35:29,409 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(59): set recoverLeaseMethod to org.apache.hadoop.fs.LeaseRecoverable.recoverLease() 2024-11-08T00:35:29,409 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.1731026116317 2024-11-08T00:35:29,411 DEBUG [regionserver/3302f0f507bd:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36951:36951),(127.0.0.1/127.0.0.1:41445:41445)] 2024-11-08T00:35:29,411 DEBUG [regionserver/3302f0f507bd:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.1731026116317 is not closed yet, will try archiving it next time 2024-11-08T00:35:29,412 WARN [IPC Server handler 2 on default port 42193 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.1731026116317 has not been closed. Lease recovery is in progress. RecoveryId = 1019 for block blk_1073741833_1009 2024-11-08T00:35:29,415 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.1731026116317 after 5ms 2024-11-08T00:35:29,776 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33655,DS-e64dccec-6029-4220-b1b3-3165180151b4,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-08T00:35:31,108 INFO [regionserver/3302f0f507bd:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33655,DS-e64dccec-6029-4220-b1b3-3165180151b4,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-08T00:35:31,412 INFO [regionserver/3302f0f507bd:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33655,DS-e64dccec-6029-4220-b1b3-3165180151b4,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-08T00:35:31,413 INFO [Time-limited test {}] wal.TestLogRolling(261): log.getCurrentFileName(): hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.1731026129401 2024-11-08T00:35:31,414 WARN [ResponseProcessor for block BP-648086526-172.17.0.3-1731026113172:blk_1073741838_1018 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-648086526-172.17.0.3-1731026113172:blk_1073741838_1018 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-08T00:35:31,414 WARN [DataStreamer for file /user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.1731026129401 block BP-648086526-172.17.0.3-1731026113172:blk_1073741838_1018 {}] hdfs.DataStreamer(1731): Error Recovery for BP-648086526-172.17.0.3-1731026113172:blk_1073741838_1018 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39015,DS-8fc96e2d-ca22-49d5-a21f-b7aea3ee6969,DISK], DatanodeInfoWithStorage[127.0.0.1:36123,DS-6d4ce634-4f50-4215-ac28-47265c61cdfe,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39015,DS-8fc96e2d-ca22-49d5-a21f-b7aea3ee6969,DISK]) is bad. 2024-11-08T00:35:31,414 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1374943833_22 at /127.0.0.1:33048 [Receiving block BP-648086526-172.17.0.3-1731026113172:blk_1073741838_1018] {}] datanode.DataXceiver(331): 127.0.0.1:39015:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:33048 dst: /127.0.0.1:39015 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-08T00:35:31,415 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1374943833_22 at /127.0.0.1:38800 [Receiving block BP-648086526-172.17.0.3-1731026113172:blk_1073741838_1018] {}] datanode.DataXceiver(331): 127.0.0.1:36123:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:38800 dst: /127.0.0.1:36123 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-08T00:35:31,465 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@603e24c7{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-08T00:35:31,465 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@33f262d4{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-08T00:35:31,465 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-08T00:35:31,465 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@92842f7{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-08T00:35:31,466 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@37673872{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/33fe3ad3-2016-67dd-bf13-d3c46fb5d9ce/hadoop.log.dir/,STOPPED} 2024-11-08T00:35:31,467 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-08T00:35:31,467 WARN [BP-648086526-172.17.0.3-1731026113172 heartbeating to localhost/127.0.0.1:42193 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-08T00:35:31,467 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-08T00:35:31,467 WARN [BP-648086526-172.17.0.3-1731026113172 heartbeating to localhost/127.0.0.1:42193 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-648086526-172.17.0.3-1731026113172 (Datanode Uuid eb89e8ff-cdbd-4b0e-b861-7aaf5c59eea8) service to localhost/127.0.0.1:42193 2024-11-08T00:35:31,468 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/33fe3ad3-2016-67dd-bf13-d3c46fb5d9ce/cluster_94d4fa05-7ba4-b350-d218-42d4a2c67eba/data/data9/current/BP-648086526-172.17.0.3-1731026113172 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-08T00:35:31,468 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/33fe3ad3-2016-67dd-bf13-d3c46fb5d9ce/cluster_94d4fa05-7ba4-b350-d218-42d4a2c67eba/data/data10/current/BP-648086526-172.17.0.3-1731026113172 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-08T00:35:31,468 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-08T00:35:31,777 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33655,DS-e64dccec-6029-4220-b1b3-3165180151b4,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-08T00:35:33,109 INFO [regionserver/3302f0f507bd:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33655,DS-e64dccec-6029-4220-b1b3-3165180151b4,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-08T00:35:33,412 WARN [regionserver/3302f0f507bd:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36123,DS-6d4ce634-4f50-4215-ac28-47265c61cdfe,DISK]] 2024-11-08T00:35:33,413 INFO [regionserver/3302f0f507bd:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33655,DS-e64dccec-6029-4220-b1b3-3165180151b4,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-08T00:35:33,413 DEBUG [regionserver/3302f0f507bd:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 3302f0f507bd%2C42513%2C1731026115632:(num 1731026129401) roll requested 2024-11-08T00:35:33,413 INFO [regionserver/3302f0f507bd:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3302f0f507bd%2C42513%2C1731026115632.1731026133413 2024-11-08T00:35:33,416 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.1731026116317 after 4007ms 2024-11-08T00:35:33,418 WARN [Thread-910 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741839_1021 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:39015 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-08T00:35:33,418 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1374943833_22 at /127.0.0.1:51076 [Receiving block BP-648086526-172.17.0.3-1731026113172:blk_1073741839_1021] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/33fe3ad3-2016-67dd-bf13-d3c46fb5d9ce/cluster_94d4fa05-7ba4-b350-d218-42d4a2c67eba/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/33fe3ad3-2016-67dd-bf13-d3c46fb5d9ce/cluster_94d4fa05-7ba4-b350-d218-42d4a2c67eba/data/data8]'}, localName='127.0.0.1:36123', datanodeUuid='2d4bebcb-cccc-4031-93f0-312d464b762f', xmitsInProgress=0}:Exception transferring block BP-648086526-172.17.0.3-1731026113172:blk_1073741839_1021 to mirror 127.0.0.1:39015 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-08T00:35:33,419 WARN [Thread-910 {}] hdfs.DataStreamer(1731): Error Recovery for BP-648086526-172.17.0.3-1731026113172:blk_1073741839_1021 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36123,DS-6d4ce634-4f50-4215-ac28-47265c61cdfe,DISK], DatanodeInfoWithStorage[127.0.0.1:39015,DS-8fc96e2d-ca22-49d5-a21f-b7aea3ee6969,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:39015,DS-8fc96e2d-ca22-49d5-a21f-b7aea3ee6969,DISK]) is bad. 2024-11-08T00:35:33,419 WARN [Thread-910 {}] hdfs.DataStreamer(1850): Abandoning BP-648086526-172.17.0.3-1731026113172:blk_1073741839_1021 2024-11-08T00:35:33,419 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1374943833_22 at /127.0.0.1:51076 [Receiving block BP-648086526-172.17.0.3-1731026113172:blk_1073741839_1021] {}] datanode.BlockReceiver(316): Block 1073741839 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-08T00:35:33,419 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1374943833_22 at /127.0.0.1:51076 [Receiving block BP-648086526-172.17.0.3-1731026113172:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:36123:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51076 dst: /127.0.0.1:36123 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-08T00:35:33,423 WARN [Thread-910 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39015,DS-8fc96e2d-ca22-49d5-a21f-b7aea3ee6969,DISK] 2024-11-08T00:35:33,426 WARN [Thread-910 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741840_1022 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-08T00:35:33,426 WARN [Thread-910 {}] hdfs.DataStreamer(1731): Error Recovery for BP-648086526-172.17.0.3-1731026113172:blk_1073741840_1022 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33655,DS-e64dccec-6029-4220-b1b3-3165180151b4,DISK], DatanodeInfoWithStorage[127.0.0.1:38985,DS-5e11058e-8356-4495-ab75-8441e34f8bac,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33655,DS-e64dccec-6029-4220-b1b3-3165180151b4,DISK]) is bad. 2024-11-08T00:35:33,426 WARN [Thread-910 {}] hdfs.DataStreamer(1850): Abandoning BP-648086526-172.17.0.3-1731026113172:blk_1073741840_1022 2024-11-08T00:35:33,427 WARN [Thread-910 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33655,DS-e64dccec-6029-4220-b1b3-3165180151b4,DISK] 2024-11-08T00:35:33,428 WARN [Thread-910 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741841_1023 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-08T00:35:33,428 WARN [Thread-910 {}] hdfs.DataStreamer(1731): Error Recovery for BP-648086526-172.17.0.3-1731026113172:blk_1073741841_1023 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38985,DS-5e11058e-8356-4495-ab75-8441e34f8bac,DISK], DatanodeInfoWithStorage[127.0.0.1:34377,DS-79fac2fc-1df6-4762-84aa-e7ca600b7ca6,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38985,DS-5e11058e-8356-4495-ab75-8441e34f8bac,DISK]) is bad. 2024-11-08T00:35:33,428 WARN [Thread-910 {}] hdfs.DataStreamer(1850): Abandoning BP-648086526-172.17.0.3-1731026113172:blk_1073741841_1023 2024-11-08T00:35:33,429 WARN [Thread-910 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38985,DS-5e11058e-8356-4495-ab75-8441e34f8bac,DISK] 2024-11-08T00:35:33,432 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:35:33,432 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:35:33,432 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:35:33,432 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:35:33,432 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:35:33,433 INFO [regionserver/3302f0f507bd:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.1731026129401 with entries=2, filesize=2.37 KB; new WAL /user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.1731026133413 2024-11-08T00:35:33,433 DEBUG [regionserver/3302f0f507bd:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41445:41445),(127.0.0.1/127.0.0.1:42245:42245)] 2024-11-08T00:35:33,433 DEBUG [regionserver/3302f0f507bd:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.1731026116317 is not closed yet, will try archiving it next time 2024-11-08T00:35:33,433 DEBUG [regionserver/3302f0f507bd:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.1731026129401 is not closed yet, will try archiving it next time 2024-11-08T00:35:33,434 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36123 is added to blk_1073741838_1020 (size=2431) 2024-11-08T00:35:33,474 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-08T00:35:33,777 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33655,DS-e64dccec-6029-4220-b1b3-3165180151b4,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-08T00:35:33,835 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.1731026116317 is not closed yet, will try archiving it next time 2024-11-08T00:35:35,109 INFO [regionserver/3302f0f507bd:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33655,DS-e64dccec-6029-4220-b1b3-3165180151b4,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-08T00:35:35,270 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@3cfd0faa[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:36123, datanodeUuid=2d4bebcb-cccc-4031-93f0-312d464b762f, infoPort=41445, infoSecurePort=0, ipcPort=46293, storageInfo=lv=-57;cid=testClusterID;nsid=771899493;c=1731026113172):Failed to transfer BP-648086526-172.17.0.3-1731026113172:blk_1073741838_1020 to 127.0.0.1:33655 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-08T00:35:35,434 INFO [regionserver/3302f0f507bd:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33655,DS-e64dccec-6029-4220-b1b3-3165180151b4,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-08T00:35:35,479 WARN [ResponseProcessor for block BP-648086526-172.17.0.3-1731026113172:blk_1073741842_1024 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-648086526-172.17.0.3-1731026113172:blk_1073741842_1024 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-08T00:35:35,479 WARN [DataStreamer for file /user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.1731026133413 block BP-648086526-172.17.0.3-1731026113172:blk_1073741842_1024 {}] hdfs.DataStreamer(1731): Error Recovery for BP-648086526-172.17.0.3-1731026113172:blk_1073741842_1024 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36123,DS-6d4ce634-4f50-4215-ac28-47265c61cdfe,DISK], DatanodeInfoWithStorage[127.0.0.1:34377,DS-79fac2fc-1df6-4762-84aa-e7ca600b7ca6,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36123,DS-6d4ce634-4f50-4215-ac28-47265c61cdfe,DISK]) is bad. 2024-11-08T00:35:35,480 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1374943833_22 at /127.0.0.1:51086 [Receiving block BP-648086526-172.17.0.3-1731026113172:blk_1073741842_1024] {}] datanode.DataXceiver(331): 127.0.0.1:36123:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51086 dst: /127.0.0.1:36123 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-08T00:35:35,480 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1374943833_22 at /127.0.0.1:36212 [Receiving block BP-648086526-172.17.0.3-1731026113172:blk_1073741842_1024] {}] datanode.DataXceiver(331): 127.0.0.1:34377:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36212 dst: /127.0.0.1:34377 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-08T00:35:35,514 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@314d8ec{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-08T00:35:35,514 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@49584ab0{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-08T00:35:35,514 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-08T00:35:35,514 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5cbf28ae{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-08T00:35:35,515 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7cde9b58{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/33fe3ad3-2016-67dd-bf13-d3c46fb5d9ce/hadoop.log.dir/,STOPPED} 2024-11-08T00:35:35,517 WARN [BP-648086526-172.17.0.3-1731026113172 heartbeating to localhost/127.0.0.1:42193 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-08T00:35:35,517 WARN [BP-648086526-172.17.0.3-1731026113172 heartbeating to localhost/127.0.0.1:42193 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-648086526-172.17.0.3-1731026113172 (Datanode Uuid 2d4bebcb-cccc-4031-93f0-312d464b762f) service to localhost/127.0.0.1:42193 2024-11-08T00:35:35,517 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-08T00:35:35,517 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-08T00:35:35,518 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/33fe3ad3-2016-67dd-bf13-d3c46fb5d9ce/cluster_94d4fa05-7ba4-b350-d218-42d4a2c67eba/data/data7/current/BP-648086526-172.17.0.3-1731026113172 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-08T00:35:35,518 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-08T00:35:35,519 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/33fe3ad3-2016-67dd-bf13-d3c46fb5d9ce/cluster_94d4fa05-7ba4-b350-d218-42d4a2c67eba/data/data8/current/BP-648086526-172.17.0.3-1731026113172 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-08T00:35:35,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42513 {}] regionserver.HRegion(8855): Flush requested on b1f95ba699b0327bbfb9ea73b592e2f6 2024-11-08T00:35:35,528 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing b1f95ba699b0327bbfb9ea73b592e2f6 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-08T00:35:35,549 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b1f95ba699b0327bbfb9ea73b592e2f6/.tmp/info/5e10b90bede54919a84c7ff99a14fdbd is 1080, key is row0002/info:/1731026131470/Put/seqid=0 2024-11-08T00:35:35,551 WARN [Thread-922 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741843_1026 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-08T00:35:35,552 WARN [Thread-922 {}] hdfs.DataStreamer(1731): Error Recovery for BP-648086526-172.17.0.3-1731026113172:blk_1073741843_1026 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38985,DS-5e11058e-8356-4495-ab75-8441e34f8bac,DISK], DatanodeInfoWithStorage[127.0.0.1:34377,DS-79fac2fc-1df6-4762-84aa-e7ca600b7ca6,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38985,DS-5e11058e-8356-4495-ab75-8441e34f8bac,DISK]) is bad. 2024-11-08T00:35:35,552 WARN [Thread-922 {}] hdfs.DataStreamer(1850): Abandoning BP-648086526-172.17.0.3-1731026113172:blk_1073741843_1026 2024-11-08T00:35:35,552 WARN [Thread-922 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38985,DS-5e11058e-8356-4495-ab75-8441e34f8bac,DISK] 2024-11-08T00:35:35,554 WARN [Thread-922 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741844_1027 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-08T00:35:35,554 WARN [Thread-922 {}] hdfs.DataStreamer(1731): Error Recovery for BP-648086526-172.17.0.3-1731026113172:blk_1073741844_1027 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39015,DS-8fc96e2d-ca22-49d5-a21f-b7aea3ee6969,DISK], DatanodeInfoWithStorage[127.0.0.1:36123,DS-6d4ce634-4f50-4215-ac28-47265c61cdfe,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39015,DS-8fc96e2d-ca22-49d5-a21f-b7aea3ee6969,DISK]) is bad. 2024-11-08T00:35:35,554 WARN [Thread-922 {}] hdfs.DataStreamer(1850): Abandoning BP-648086526-172.17.0.3-1731026113172:blk_1073741844_1027 2024-11-08T00:35:35,555 WARN [Thread-922 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39015,DS-8fc96e2d-ca22-49d5-a21f-b7aea3ee6969,DISK] 2024-11-08T00:35:35,556 WARN [Thread-922 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741845_1028 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-08T00:35:35,556 WARN [Thread-922 {}] hdfs.DataStreamer(1731): Error Recovery for BP-648086526-172.17.0.3-1731026113172:blk_1073741845_1028 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33655,DS-e64dccec-6029-4220-b1b3-3165180151b4,DISK], DatanodeInfoWithStorage[127.0.0.1:36123,DS-6d4ce634-4f50-4215-ac28-47265c61cdfe,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33655,DS-e64dccec-6029-4220-b1b3-3165180151b4,DISK]) is bad. 2024-11-08T00:35:35,557 WARN [Thread-922 {}] hdfs.DataStreamer(1850): Abandoning BP-648086526-172.17.0.3-1731026113172:blk_1073741845_1028 2024-11-08T00:35:35,558 WARN [Thread-922 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33655,DS-e64dccec-6029-4220-b1b3-3165180151b4,DISK] 2024-11-08T00:35:35,560 WARN [Thread-922 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741846_1029 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:36123 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-08T00:35:35,560 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1374943833_22 at /127.0.0.1:36228 [Receiving block BP-648086526-172.17.0.3-1731026113172:blk_1073741846_1029] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/33fe3ad3-2016-67dd-bf13-d3c46fb5d9ce/cluster_94d4fa05-7ba4-b350-d218-42d4a2c67eba/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/33fe3ad3-2016-67dd-bf13-d3c46fb5d9ce/cluster_94d4fa05-7ba4-b350-d218-42d4a2c67eba/data/data6]'}, localName='127.0.0.1:34377', datanodeUuid='9f14824b-723a-48c9-a12a-a80f2a62b14a', xmitsInProgress=0}:Exception transferring block BP-648086526-172.17.0.3-1731026113172:blk_1073741846_1029 to mirror 127.0.0.1:36123 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-08T00:35:35,561 WARN [Thread-922 {}] hdfs.DataStreamer(1731): Error Recovery for BP-648086526-172.17.0.3-1731026113172:blk_1073741846_1029 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34377,DS-79fac2fc-1df6-4762-84aa-e7ca600b7ca6,DISK], DatanodeInfoWithStorage[127.0.0.1:36123,DS-6d4ce634-4f50-4215-ac28-47265c61cdfe,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:36123,DS-6d4ce634-4f50-4215-ac28-47265c61cdfe,DISK]) is bad. 2024-11-08T00:35:35,561 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1374943833_22 at /127.0.0.1:36228 [Receiving block BP-648086526-172.17.0.3-1731026113172:blk_1073741846_1029] {}] datanode.BlockReceiver(316): Block 1073741846 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-08T00:35:35,561 WARN [Thread-922 {}] hdfs.DataStreamer(1850): Abandoning BP-648086526-172.17.0.3-1731026113172:blk_1073741846_1029 2024-11-08T00:35:35,561 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1374943833_22 at /127.0.0.1:36228 [Receiving block BP-648086526-172.17.0.3-1731026113172:blk_1073741846_1029] {}] datanode.DataXceiver(331): 127.0.0.1:34377:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36228 dst: /127.0.0.1:34377 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-08T00:35:35,562 WARN [Thread-922 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36123,DS-6d4ce634-4f50-4215-ac28-47265c61cdfe,DISK] 2024-11-08T00:35:35,562 WARN [IPC Server handler 1 on default port 42193 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-08T00:35:35,563 WARN [IPC Server handler 1 on default port 42193 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-08T00:35:35,563 WARN [IPC Server handler 1 on default port 42193 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-08T00:35:35,567 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34377 is added to blk_1073741847_1030 (size=10347) 2024-11-08T00:35:35,777 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33655,DS-e64dccec-6029-4220-b1b3-3165180151b4,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-08T00:35:35,968 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b1f95ba699b0327bbfb9ea73b592e2f6/.tmp/info/5e10b90bede54919a84c7ff99a14fdbd 2024-11-08T00:35:35,976 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b1f95ba699b0327bbfb9ea73b592e2f6/.tmp/info/5e10b90bede54919a84c7ff99a14fdbd as hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b1f95ba699b0327bbfb9ea73b592e2f6/info/5e10b90bede54919a84c7ff99a14fdbd 2024-11-08T00:35:35,984 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b1f95ba699b0327bbfb9ea73b592e2f6/info/5e10b90bede54919a84c7ff99a14fdbd, entries=5, sequenceid=11, filesize=10.1 K 2024-11-08T00:35:35,985 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=9.45 KB/9681 for b1f95ba699b0327bbfb9ea73b592e2f6 in 457ms, sequenceid=11, compaction requested=false 2024-11-08T00:35:35,985 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for b1f95ba699b0327bbfb9ea73b592e2f6: 2024-11-08T00:35:36,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42513 {}] regionserver.HRegion(8855): Flush requested on b1f95ba699b0327bbfb9ea73b592e2f6 2024-11-08T00:35:36,170 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing b1f95ba699b0327bbfb9ea73b592e2f6 1/1 column families, dataSize=10.50 KB heapSize=11.50 KB 2024-11-08T00:35:36,176 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b1f95ba699b0327bbfb9ea73b592e2f6/.tmp/info/4cd974cc34cc4f238120ca36f6c51ad1 is 1080, key is row0007/info:/1731026135529/Put/seqid=0 2024-11-08T00:35:36,178 WARN [Thread-928 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741848_1031 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-08T00:35:36,178 WARN [Thread-928 {}] hdfs.DataStreamer(1731): Error Recovery for BP-648086526-172.17.0.3-1731026113172:blk_1073741848_1031 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38985,DS-5e11058e-8356-4495-ab75-8441e34f8bac,DISK], DatanodeInfoWithStorage[127.0.0.1:33655,DS-e64dccec-6029-4220-b1b3-3165180151b4,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38985,DS-5e11058e-8356-4495-ab75-8441e34f8bac,DISK]) is bad. 2024-11-08T00:35:36,178 WARN [Thread-928 {}] hdfs.DataStreamer(1850): Abandoning BP-648086526-172.17.0.3-1731026113172:blk_1073741848_1031 2024-11-08T00:35:36,179 WARN [Thread-928 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38985,DS-5e11058e-8356-4495-ab75-8441e34f8bac,DISK] 2024-11-08T00:35:36,181 WARN [Thread-928 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741849_1032 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-08T00:35:36,181 WARN [Thread-928 {}] hdfs.DataStreamer(1731): Error Recovery for BP-648086526-172.17.0.3-1731026113172:blk_1073741849_1032 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39015,DS-8fc96e2d-ca22-49d5-a21f-b7aea3ee6969,DISK], DatanodeInfoWithStorage[127.0.0.1:34377,DS-79fac2fc-1df6-4762-84aa-e7ca600b7ca6,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39015,DS-8fc96e2d-ca22-49d5-a21f-b7aea3ee6969,DISK]) is bad. 2024-11-08T00:35:36,181 WARN [Thread-928 {}] hdfs.DataStreamer(1850): Abandoning BP-648086526-172.17.0.3-1731026113172:blk_1073741849_1032 2024-11-08T00:35:36,182 WARN [Thread-928 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39015,DS-8fc96e2d-ca22-49d5-a21f-b7aea3ee6969,DISK] 2024-11-08T00:35:36,184 WARN [Thread-928 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741850_1033 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:33655 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-08T00:35:36,184 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1374943833_22 at /127.0.0.1:36244 [Receiving block BP-648086526-172.17.0.3-1731026113172:blk_1073741850_1033] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/33fe3ad3-2016-67dd-bf13-d3c46fb5d9ce/cluster_94d4fa05-7ba4-b350-d218-42d4a2c67eba/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/33fe3ad3-2016-67dd-bf13-d3c46fb5d9ce/cluster_94d4fa05-7ba4-b350-d218-42d4a2c67eba/data/data6]'}, localName='127.0.0.1:34377', datanodeUuid='9f14824b-723a-48c9-a12a-a80f2a62b14a', xmitsInProgress=0}:Exception transferring block BP-648086526-172.17.0.3-1731026113172:blk_1073741850_1033 to mirror 127.0.0.1:33655 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-08T00:35:36,185 WARN [Thread-928 {}] hdfs.DataStreamer(1731): Error Recovery for BP-648086526-172.17.0.3-1731026113172:blk_1073741850_1033 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34377,DS-79fac2fc-1df6-4762-84aa-e7ca600b7ca6,DISK], DatanodeInfoWithStorage[127.0.0.1:33655,DS-e64dccec-6029-4220-b1b3-3165180151b4,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:33655,DS-e64dccec-6029-4220-b1b3-3165180151b4,DISK]) is bad. 2024-11-08T00:35:36,185 WARN [Thread-928 {}] hdfs.DataStreamer(1850): Abandoning BP-648086526-172.17.0.3-1731026113172:blk_1073741850_1033 2024-11-08T00:35:36,185 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1374943833_22 at /127.0.0.1:36244 [Receiving block BP-648086526-172.17.0.3-1731026113172:blk_1073741850_1033] {}] datanode.BlockReceiver(316): Block 1073741850 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-08T00:35:36,185 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1374943833_22 at /127.0.0.1:36244 [Receiving block BP-648086526-172.17.0.3-1731026113172:blk_1073741850_1033] {}] datanode.DataXceiver(331): 127.0.0.1:34377:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36244 dst: /127.0.0.1:34377 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-08T00:35:36,185 WARN [Thread-928 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33655,DS-e64dccec-6029-4220-b1b3-3165180151b4,DISK] 2024-11-08T00:35:36,188 WARN [Thread-928 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741851_1034 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:36123 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-08T00:35:36,188 WARN [Thread-928 {}] hdfs.DataStreamer(1731): Error Recovery for BP-648086526-172.17.0.3-1731026113172:blk_1073741851_1034 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34377,DS-79fac2fc-1df6-4762-84aa-e7ca600b7ca6,DISK], DatanodeInfoWithStorage[127.0.0.1:36123,DS-6d4ce634-4f50-4215-ac28-47265c61cdfe,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:36123,DS-6d4ce634-4f50-4215-ac28-47265c61cdfe,DISK]) is bad. 2024-11-08T00:35:36,188 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1374943833_22 at /127.0.0.1:36252 [Receiving block BP-648086526-172.17.0.3-1731026113172:blk_1073741851_1034] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/33fe3ad3-2016-67dd-bf13-d3c46fb5d9ce/cluster_94d4fa05-7ba4-b350-d218-42d4a2c67eba/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/33fe3ad3-2016-67dd-bf13-d3c46fb5d9ce/cluster_94d4fa05-7ba4-b350-d218-42d4a2c67eba/data/data6]'}, localName='127.0.0.1:34377', datanodeUuid='9f14824b-723a-48c9-a12a-a80f2a62b14a', xmitsInProgress=0}:Exception transferring block BP-648086526-172.17.0.3-1731026113172:blk_1073741851_1034 to mirror 127.0.0.1:36123 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-08T00:35:36,188 WARN [Thread-928 {}] hdfs.DataStreamer(1850): Abandoning BP-648086526-172.17.0.3-1731026113172:blk_1073741851_1034 2024-11-08T00:35:36,188 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1374943833_22 at /127.0.0.1:36252 [Receiving block BP-648086526-172.17.0.3-1731026113172:blk_1073741851_1034] {}] datanode.BlockReceiver(316): Block 1073741851 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-08T00:35:36,189 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1374943833_22 at /127.0.0.1:36252 [Receiving block BP-648086526-172.17.0.3-1731026113172:blk_1073741851_1034] {}] datanode.DataXceiver(331): 127.0.0.1:34377:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36252 dst: /127.0.0.1:34377 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-08T00:35:36,189 WARN [Thread-928 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36123,DS-6d4ce634-4f50-4215-ac28-47265c61cdfe,DISK] 2024-11-08T00:35:36,190 WARN [IPC Server handler 2 on default port 42193 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-08T00:35:36,190 WARN [IPC Server handler 2 on default port 42193 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-08T00:35:36,190 WARN [IPC Server handler 2 on default port 42193 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-08T00:35:36,193 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34377 is added to blk_1073741852_1035 (size=12506) 2024-11-08T00:35:36,594 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=10.50 KB at sequenceid=24 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b1f95ba699b0327bbfb9ea73b592e2f6/.tmp/info/4cd974cc34cc4f238120ca36f6c51ad1 2024-11-08T00:35:36,601 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b1f95ba699b0327bbfb9ea73b592e2f6/.tmp/info/4cd974cc34cc4f238120ca36f6c51ad1 as hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b1f95ba699b0327bbfb9ea73b592e2f6/info/4cd974cc34cc4f238120ca36f6c51ad1 2024-11-08T00:35:36,609 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b1f95ba699b0327bbfb9ea73b592e2f6/info/4cd974cc34cc4f238120ca36f6c51ad1, entries=7, sequenceid=24, filesize=12.2 K 2024-11-08T00:35:36,611 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~10.50 KB/10756, heapSize ~11.48 KB/11760, currentSize=2.10 KB/2150 for b1f95ba699b0327bbfb9ea73b592e2f6 in 440ms, sequenceid=24, compaction requested=false 2024-11-08T00:35:36,611 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for b1f95ba699b0327bbfb9ea73b592e2f6: 2024-11-08T00:35:36,611 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=22.3 K, sizeToCheck=16.0 K 2024-11-08T00:35:36,611 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-08T00:35:36,611 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b1f95ba699b0327bbfb9ea73b592e2f6/info/4cd974cc34cc4f238120ca36f6c51ad1 because midkey is the same as first or last row 2024-11-08T00:35:37,109 INFO [regionserver/3302f0f507bd:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33655,DS-e64dccec-6029-4220-b1b3-3165180151b4,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-08T00:35:37,434 WARN [regionserver/3302f0f507bd:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:34377,DS-79fac2fc-1df6-4762-84aa-e7ca600b7ca6,DISK]] 2024-11-08T00:35:37,434 INFO [regionserver/3302f0f507bd:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33655,DS-e64dccec-6029-4220-b1b3-3165180151b4,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-08T00:35:37,434 DEBUG [regionserver/3302f0f507bd:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 3302f0f507bd%2C42513%2C1731026115632:(num 1731026133413) roll requested 2024-11-08T00:35:37,435 INFO [regionserver/3302f0f507bd:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3302f0f507bd%2C42513%2C1731026115632.1731026137435 2024-11-08T00:35:37,439 WARN [Thread-934 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741853_1036 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-08T00:35:37,439 WARN [Thread-934 {}] hdfs.DataStreamer(1731): Error Recovery for BP-648086526-172.17.0.3-1731026113172:blk_1073741853_1036 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33655,DS-e64dccec-6029-4220-b1b3-3165180151b4,DISK], DatanodeInfoWithStorage[127.0.0.1:38985,DS-5e11058e-8356-4495-ab75-8441e34f8bac,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33655,DS-e64dccec-6029-4220-b1b3-3165180151b4,DISK]) is bad. 2024-11-08T00:35:37,439 WARN [Thread-934 {}] hdfs.DataStreamer(1850): Abandoning BP-648086526-172.17.0.3-1731026113172:blk_1073741853_1036 2024-11-08T00:35:37,440 WARN [Thread-934 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33655,DS-e64dccec-6029-4220-b1b3-3165180151b4,DISK] 2024-11-08T00:35:37,441 WARN [Thread-934 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741854_1037 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-08T00:35:37,441 WARN [Thread-934 {}] hdfs.DataStreamer(1731): Error Recovery for BP-648086526-172.17.0.3-1731026113172:blk_1073741854_1037 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39015,DS-8fc96e2d-ca22-49d5-a21f-b7aea3ee6969,DISK], DatanodeInfoWithStorage[127.0.0.1:34377,DS-79fac2fc-1df6-4762-84aa-e7ca600b7ca6,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39015,DS-8fc96e2d-ca22-49d5-a21f-b7aea3ee6969,DISK]) is bad. 2024-11-08T00:35:37,442 WARN [Thread-934 {}] hdfs.DataStreamer(1850): Abandoning BP-648086526-172.17.0.3-1731026113172:blk_1073741854_1037 2024-11-08T00:35:37,442 WARN [Thread-934 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39015,DS-8fc96e2d-ca22-49d5-a21f-b7aea3ee6969,DISK] 2024-11-08T00:35:37,444 WARN [Thread-934 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741855_1038 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-08T00:35:37,444 WARN [Thread-934 {}] hdfs.DataStreamer(1731): Error Recovery for BP-648086526-172.17.0.3-1731026113172:blk_1073741855_1038 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38985,DS-5e11058e-8356-4495-ab75-8441e34f8bac,DISK], DatanodeInfoWithStorage[127.0.0.1:34377,DS-79fac2fc-1df6-4762-84aa-e7ca600b7ca6,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38985,DS-5e11058e-8356-4495-ab75-8441e34f8bac,DISK]) is bad. 2024-11-08T00:35:37,444 WARN [Thread-934 {}] hdfs.DataStreamer(1850): Abandoning BP-648086526-172.17.0.3-1731026113172:blk_1073741855_1038 2024-11-08T00:35:37,444 WARN [Thread-934 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38985,DS-5e11058e-8356-4495-ab75-8441e34f8bac,DISK] 2024-11-08T00:35:37,446 WARN [Thread-934 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741856_1039 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-08T00:35:37,446 WARN [Thread-934 {}] hdfs.DataStreamer(1731): Error Recovery for BP-648086526-172.17.0.3-1731026113172:blk_1073741856_1039 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36123,DS-6d4ce634-4f50-4215-ac28-47265c61cdfe,DISK], DatanodeInfoWithStorage[127.0.0.1:34377,DS-79fac2fc-1df6-4762-84aa-e7ca600b7ca6,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36123,DS-6d4ce634-4f50-4215-ac28-47265c61cdfe,DISK]) is bad. 2024-11-08T00:35:37,446 WARN [Thread-934 {}] hdfs.DataStreamer(1850): Abandoning BP-648086526-172.17.0.3-1731026113172:blk_1073741856_1039 2024-11-08T00:35:37,446 WARN [Thread-934 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36123,DS-6d4ce634-4f50-4215-ac28-47265c61cdfe,DISK] 2024-11-08T00:35:37,447 WARN [IPC Server handler 0 on default port 42193 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-08T00:35:37,447 WARN [IPC Server handler 0 on default port 42193 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-08T00:35:37,447 WARN [IPC Server handler 0 on default port 42193 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-08T00:35:37,450 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:35:37,450 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:35:37,450 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:35:37,450 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:35:37,451 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:35:37,451 INFO [regionserver/3302f0f507bd:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.1731026133413 with entries=25, filesize=25.38 KB; new WAL /user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.1731026137435 2024-11-08T00:35:37,452 DEBUG [regionserver/3302f0f507bd:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42245:42245)] 2024-11-08T00:35:37,452 DEBUG [regionserver/3302f0f507bd:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.1731026116317 is not closed yet, will try archiving it next time 2024-11-08T00:35:37,452 DEBUG [regionserver/3302f0f507bd:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.1731026133413 is not closed yet, will try archiving it next time 2024-11-08T00:35:37,452 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.1731026129401 to hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/oldWALs/3302f0f507bd%2C42513%2C1731026115632.1731026129401 2024-11-08T00:35:37,453 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34377 is added to blk_1073741842_1025 (size=25992) 2024-11-08T00:35:37,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42513 {}] regionserver.HRegion(8855): Flush requested on b1f95ba699b0327bbfb9ea73b592e2f6 2024-11-08T00:35:37,590 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing b1f95ba699b0327bbfb9ea73b592e2f6 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-08T00:35:37,595 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b1f95ba699b0327bbfb9ea73b592e2f6/.tmp/info/1bc50aaa75644fa8a9d02c5e1eb07abc is 1079, key is tmprow/info:/1731026137589/Put/seqid=0 2024-11-08T00:35:37,597 WARN [Thread-938 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741858_1041 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:33655 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-08T00:35:37,597 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1374943833_22 at /127.0.0.1:36262 [Receiving block BP-648086526-172.17.0.3-1731026113172:blk_1073741858_1041] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/33fe3ad3-2016-67dd-bf13-d3c46fb5d9ce/cluster_94d4fa05-7ba4-b350-d218-42d4a2c67eba/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/33fe3ad3-2016-67dd-bf13-d3c46fb5d9ce/cluster_94d4fa05-7ba4-b350-d218-42d4a2c67eba/data/data6]'}, localName='127.0.0.1:34377', datanodeUuid='9f14824b-723a-48c9-a12a-a80f2a62b14a', xmitsInProgress=0}:Exception transferring block BP-648086526-172.17.0.3-1731026113172:blk_1073741858_1041 to mirror 127.0.0.1:33655 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-08T00:35:37,598 WARN [Thread-938 {}] hdfs.DataStreamer(1731): Error Recovery for BP-648086526-172.17.0.3-1731026113172:blk_1073741858_1041 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34377,DS-79fac2fc-1df6-4762-84aa-e7ca600b7ca6,DISK], DatanodeInfoWithStorage[127.0.0.1:33655,DS-e64dccec-6029-4220-b1b3-3165180151b4,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:33655,DS-e64dccec-6029-4220-b1b3-3165180151b4,DISK]) is bad. 2024-11-08T00:35:37,598 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1374943833_22 at /127.0.0.1:36262 [Receiving block BP-648086526-172.17.0.3-1731026113172:blk_1073741858_1041] {}] datanode.BlockReceiver(316): Block 1073741858 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-08T00:35:37,598 WARN [Thread-938 {}] hdfs.DataStreamer(1850): Abandoning BP-648086526-172.17.0.3-1731026113172:blk_1073741858_1041 2024-11-08T00:35:37,598 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1374943833_22 at /127.0.0.1:36262 [Receiving block BP-648086526-172.17.0.3-1731026113172:blk_1073741858_1041] {}] datanode.DataXceiver(331): 127.0.0.1:34377:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36262 dst: /127.0.0.1:34377 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-08T00:35:37,598 WARN [Thread-938 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33655,DS-e64dccec-6029-4220-b1b3-3165180151b4,DISK] 2024-11-08T00:35:37,600 WARN [Thread-938 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741859_1042 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-08T00:35:37,600 WARN [Thread-938 {}] hdfs.DataStreamer(1731): Error Recovery for BP-648086526-172.17.0.3-1731026113172:blk_1073741859_1042 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36123,DS-6d4ce634-4f50-4215-ac28-47265c61cdfe,DISK], DatanodeInfoWithStorage[127.0.0.1:38985,DS-5e11058e-8356-4495-ab75-8441e34f8bac,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36123,DS-6d4ce634-4f50-4215-ac28-47265c61cdfe,DISK]) is bad. 2024-11-08T00:35:37,600 WARN [Thread-938 {}] hdfs.DataStreamer(1850): Abandoning BP-648086526-172.17.0.3-1731026113172:blk_1073741859_1042 2024-11-08T00:35:37,601 WARN [Thread-938 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36123,DS-6d4ce634-4f50-4215-ac28-47265c61cdfe,DISK] 2024-11-08T00:35:37,602 WARN [Thread-938 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741860_1043 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-08T00:35:37,602 WARN [Thread-938 {}] hdfs.DataStreamer(1731): Error Recovery for BP-648086526-172.17.0.3-1731026113172:blk_1073741860_1043 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38985,DS-5e11058e-8356-4495-ab75-8441e34f8bac,DISK], DatanodeInfoWithStorage[127.0.0.1:39015,DS-8fc96e2d-ca22-49d5-a21f-b7aea3ee6969,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38985,DS-5e11058e-8356-4495-ab75-8441e34f8bac,DISK]) is bad. 2024-11-08T00:35:37,602 WARN [Thread-938 {}] hdfs.DataStreamer(1850): Abandoning BP-648086526-172.17.0.3-1731026113172:blk_1073741860_1043 2024-11-08T00:35:37,603 WARN [Thread-938 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38985,DS-5e11058e-8356-4495-ab75-8441e34f8bac,DISK] 2024-11-08T00:35:37,605 WARN [Thread-938 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741861_1044 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:39015 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-08T00:35:37,605 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1374943833_22 at /127.0.0.1:36268 [Receiving block BP-648086526-172.17.0.3-1731026113172:blk_1073741861_1044] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/33fe3ad3-2016-67dd-bf13-d3c46fb5d9ce/cluster_94d4fa05-7ba4-b350-d218-42d4a2c67eba/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/33fe3ad3-2016-67dd-bf13-d3c46fb5d9ce/cluster_94d4fa05-7ba4-b350-d218-42d4a2c67eba/data/data6]'}, localName='127.0.0.1:34377', datanodeUuid='9f14824b-723a-48c9-a12a-a80f2a62b14a', xmitsInProgress=0}:Exception transferring block BP-648086526-172.17.0.3-1731026113172:blk_1073741861_1044 to mirror 127.0.0.1:39015 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-08T00:35:37,605 WARN [Thread-938 {}] hdfs.DataStreamer(1731): Error Recovery for BP-648086526-172.17.0.3-1731026113172:blk_1073741861_1044 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34377,DS-79fac2fc-1df6-4762-84aa-e7ca600b7ca6,DISK], DatanodeInfoWithStorage[127.0.0.1:39015,DS-8fc96e2d-ca22-49d5-a21f-b7aea3ee6969,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:39015,DS-8fc96e2d-ca22-49d5-a21f-b7aea3ee6969,DISK]) is bad. 2024-11-08T00:35:37,605 WARN [Thread-938 {}] hdfs.DataStreamer(1850): Abandoning BP-648086526-172.17.0.3-1731026113172:blk_1073741861_1044 2024-11-08T00:35:37,605 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1374943833_22 at /127.0.0.1:36268 [Receiving block BP-648086526-172.17.0.3-1731026113172:blk_1073741861_1044] {}] datanode.BlockReceiver(316): Block 1073741861 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-08T00:35:37,606 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1374943833_22 at /127.0.0.1:36268 [Receiving block BP-648086526-172.17.0.3-1731026113172:blk_1073741861_1044] {}] datanode.DataXceiver(331): 127.0.0.1:34377:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36268 dst: /127.0.0.1:34377 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-08T00:35:37,606 WARN [Thread-938 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39015,DS-8fc96e2d-ca22-49d5-a21f-b7aea3ee6969,DISK] 2024-11-08T00:35:37,607 WARN [IPC Server handler 1 on default port 42193 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-08T00:35:37,607 WARN [IPC Server handler 1 on default port 42193 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-08T00:35:37,607 WARN [IPC Server handler 1 on default port 42193 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-08T00:35:37,610 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34377 is added to blk_1073741862_1045 (size=6027) 2024-11-08T00:35:37,778 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33655,DS-e64dccec-6029-4220-b1b3-3165180151b4,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-08T00:35:37,854 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.1731026116317 is not closed yet, will try archiving it next time 2024-11-08T00:35:38,011 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=34 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b1f95ba699b0327bbfb9ea73b592e2f6/.tmp/info/1bc50aaa75644fa8a9d02c5e1eb07abc 2024-11-08T00:35:38,018 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b1f95ba699b0327bbfb9ea73b592e2f6/.tmp/info/1bc50aaa75644fa8a9d02c5e1eb07abc as hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b1f95ba699b0327bbfb9ea73b592e2f6/info/1bc50aaa75644fa8a9d02c5e1eb07abc 2024-11-08T00:35:38,025 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b1f95ba699b0327bbfb9ea73b592e2f6/info/1bc50aaa75644fa8a9d02c5e1eb07abc, entries=1, sequenceid=34, filesize=5.9 K 2024-11-08T00:35:38,026 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for b1f95ba699b0327bbfb9ea73b592e2f6 in 436ms, sequenceid=34, compaction requested=true 2024-11-08T00:35:38,027 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for b1f95ba699b0327bbfb9ea73b592e2f6: 2024-11-08T00:35:38,027 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=28.2 K, sizeToCheck=16.0 K 2024-11-08T00:35:38,027 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-08T00:35:38,027 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b1f95ba699b0327bbfb9ea73b592e2f6/info/4cd974cc34cc4f238120ca36f6c51ad1 because midkey is the same as first or last row 2024-11-08T00:35:38,027 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b1f95ba699b0327bbfb9ea73b592e2f6:info, priority=-2147483648, current under compaction store size is 1 2024-11-08T00:35:38,027 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-08T00:35:38,027 DEBUG [RS:0;3302f0f507bd:42513-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-08T00:35:38,029 DEBUG [RS:0;3302f0f507bd:42513-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 28880 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-08T00:35:38,029 DEBUG [RS:0;3302f0f507bd:42513-shortCompactions-0 {}] regionserver.HStore(1541): b1f95ba699b0327bbfb9ea73b592e2f6/info is initiating minor compaction (all files) 2024-11-08T00:35:38,029 INFO [RS:0;3302f0f507bd:42513-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of b1f95ba699b0327bbfb9ea73b592e2f6/info in TestLogRolling-testLogRollOnDatanodeDeath,,1731026117146.b1f95ba699b0327bbfb9ea73b592e2f6. 2024-11-08T00:35:38,029 INFO [RS:0;3302f0f507bd:42513-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b1f95ba699b0327bbfb9ea73b592e2f6/info/5e10b90bede54919a84c7ff99a14fdbd, hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b1f95ba699b0327bbfb9ea73b592e2f6/info/4cd974cc34cc4f238120ca36f6c51ad1, hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b1f95ba699b0327bbfb9ea73b592e2f6/info/1bc50aaa75644fa8a9d02c5e1eb07abc] into tmpdir=hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b1f95ba699b0327bbfb9ea73b592e2f6/.tmp, totalSize=28.2 K 2024-11-08T00:35:38,030 DEBUG [RS:0;3302f0f507bd:42513-shortCompactions-0 {}] compactions.Compactor(225): Compacting 5e10b90bede54919a84c7ff99a14fdbd, keycount=5, bloomtype=ROW, size=10.1 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1731026131470 2024-11-08T00:35:38,030 DEBUG [RS:0;3302f0f507bd:42513-shortCompactions-0 {}] compactions.Compactor(225): Compacting 4cd974cc34cc4f238120ca36f6c51ad1, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=24, earliestPutTs=1731026135529 2024-11-08T00:35:38,031 DEBUG [RS:0;3302f0f507bd:42513-shortCompactions-0 {}] compactions.Compactor(225): Compacting 1bc50aaa75644fa8a9d02c5e1eb07abc, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=34, earliestPutTs=1731026137589 2024-11-08T00:35:38,045 INFO [RS:0;3302f0f507bd:42513-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): b1f95ba699b0327bbfb9ea73b592e2f6#info#compaction#21 average throughput is 6.16 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-08T00:35:38,046 DEBUG [RS:0;3302f0f507bd:42513-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b1f95ba699b0327bbfb9ea73b592e2f6/.tmp/info/99135ae6026f431aa0d16ff93d243a7c is 1080, key is row0002/info:/1731026131470/Put/seqid=0 2024-11-08T00:35:38,048 WARN [Thread-946 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741863_1046 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-08T00:35:38,048 WARN [Thread-946 {}] hdfs.DataStreamer(1731): Error Recovery for BP-648086526-172.17.0.3-1731026113172:blk_1073741863_1046 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36123,DS-6d4ce634-4f50-4215-ac28-47265c61cdfe,DISK], DatanodeInfoWithStorage[127.0.0.1:38985,DS-5e11058e-8356-4495-ab75-8441e34f8bac,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36123,DS-6d4ce634-4f50-4215-ac28-47265c61cdfe,DISK]) is bad. 2024-11-08T00:35:38,048 WARN [Thread-946 {}] hdfs.DataStreamer(1850): Abandoning BP-648086526-172.17.0.3-1731026113172:blk_1073741863_1046 2024-11-08T00:35:38,048 WARN [Thread-946 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36123,DS-6d4ce634-4f50-4215-ac28-47265c61cdfe,DISK] 2024-11-08T00:35:38,050 WARN [Thread-946 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741864_1047 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-08T00:35:38,050 WARN [Thread-946 {}] hdfs.DataStreamer(1731): Error Recovery for BP-648086526-172.17.0.3-1731026113172:blk_1073741864_1047 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33655,DS-e64dccec-6029-4220-b1b3-3165180151b4,DISK], DatanodeInfoWithStorage[127.0.0.1:39015,DS-8fc96e2d-ca22-49d5-a21f-b7aea3ee6969,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33655,DS-e64dccec-6029-4220-b1b3-3165180151b4,DISK]) is bad. 2024-11-08T00:35:38,050 WARN [Thread-946 {}] hdfs.DataStreamer(1850): Abandoning BP-648086526-172.17.0.3-1731026113172:blk_1073741864_1047 2024-11-08T00:35:38,050 WARN [Thread-946 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33655,DS-e64dccec-6029-4220-b1b3-3165180151b4,DISK] 2024-11-08T00:35:38,052 WARN [Thread-946 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741865_1048 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-08T00:35:38,052 WARN [Thread-946 {}] hdfs.DataStreamer(1731): Error Recovery for BP-648086526-172.17.0.3-1731026113172:blk_1073741865_1048 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39015,DS-8fc96e2d-ca22-49d5-a21f-b7aea3ee6969,DISK], DatanodeInfoWithStorage[127.0.0.1:38985,DS-5e11058e-8356-4495-ab75-8441e34f8bac,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39015,DS-8fc96e2d-ca22-49d5-a21f-b7aea3ee6969,DISK]) is bad. 2024-11-08T00:35:38,052 WARN [Thread-946 {}] hdfs.DataStreamer(1850): Abandoning BP-648086526-172.17.0.3-1731026113172:blk_1073741865_1048 2024-11-08T00:35:38,052 WARN [Thread-946 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39015,DS-8fc96e2d-ca22-49d5-a21f-b7aea3ee6969,DISK] 2024-11-08T00:35:38,055 WARN [Thread-946 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741866_1049 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:38985 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-08T00:35:38,055 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1374943833_22 at /127.0.0.1:36310 [Receiving block BP-648086526-172.17.0.3-1731026113172:blk_1073741866_1049] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/33fe3ad3-2016-67dd-bf13-d3c46fb5d9ce/cluster_94d4fa05-7ba4-b350-d218-42d4a2c67eba/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/33fe3ad3-2016-67dd-bf13-d3c46fb5d9ce/cluster_94d4fa05-7ba4-b350-d218-42d4a2c67eba/data/data6]'}, localName='127.0.0.1:34377', datanodeUuid='9f14824b-723a-48c9-a12a-a80f2a62b14a', xmitsInProgress=0}:Exception transferring block BP-648086526-172.17.0.3-1731026113172:blk_1073741866_1049 to mirror 127.0.0.1:38985 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-08T00:35:38,055 WARN [Thread-946 {}] hdfs.DataStreamer(1731): Error Recovery for BP-648086526-172.17.0.3-1731026113172:blk_1073741866_1049 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34377,DS-79fac2fc-1df6-4762-84aa-e7ca600b7ca6,DISK], DatanodeInfoWithStorage[127.0.0.1:38985,DS-5e11058e-8356-4495-ab75-8441e34f8bac,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:38985,DS-5e11058e-8356-4495-ab75-8441e34f8bac,DISK]) is bad. 2024-11-08T00:35:38,055 WARN [Thread-946 {}] hdfs.DataStreamer(1850): Abandoning BP-648086526-172.17.0.3-1731026113172:blk_1073741866_1049 2024-11-08T00:35:38,055 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1374943833_22 at /127.0.0.1:36310 [Receiving block BP-648086526-172.17.0.3-1731026113172:blk_1073741866_1049] {}] datanode.BlockReceiver(316): Block 1073741866 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-08T00:35:38,055 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1374943833_22 at /127.0.0.1:36310 [Receiving block BP-648086526-172.17.0.3-1731026113172:blk_1073741866_1049] {}] datanode.DataXceiver(331): 127.0.0.1:34377:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36310 dst: /127.0.0.1:34377 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-08T00:35:38,056 WARN [Thread-946 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38985,DS-5e11058e-8356-4495-ab75-8441e34f8bac,DISK] 2024-11-08T00:35:38,056 WARN [IPC Server handler 3 on default port 42193 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-08T00:35:38,057 WARN [IPC Server handler 3 on default port 42193 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-08T00:35:38,057 WARN [IPC Server handler 3 on default port 42193 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-08T00:35:38,060 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34377 is added to blk_1073741867_1050 (size=17994) 2024-11-08T00:35:38,078 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@39e0612d[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:34377, datanodeUuid=9f14824b-723a-48c9-a12a-a80f2a62b14a, infoPort=42245, infoSecurePort=0, ipcPort=35837, storageInfo=lv=-57;cid=testClusterID;nsid=771899493;c=1731026113172):Failed to transfer BP-648086526-172.17.0.3-1731026113172:blk_1073741847_1030 to 127.0.0.1:39015 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-08T00:35:38,078 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@37279861[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:34377, datanodeUuid=9f14824b-723a-48c9-a12a-a80f2a62b14a, infoPort=42245, infoSecurePort=0, ipcPort=35837, storageInfo=lv=-57;cid=testClusterID;nsid=771899493;c=1731026113172):Failed to transfer BP-648086526-172.17.0.3-1731026113172:blk_1073741852_1035 to 127.0.0.1:39015 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-08T00:35:38,470 DEBUG [RS:0;3302f0f507bd:42513-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b1f95ba699b0327bbfb9ea73b592e2f6/.tmp/info/99135ae6026f431aa0d16ff93d243a7c as hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b1f95ba699b0327bbfb9ea73b592e2f6/info/99135ae6026f431aa0d16ff93d243a7c 2024-11-08T00:35:38,479 INFO [RS:0;3302f0f507bd:42513-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in b1f95ba699b0327bbfb9ea73b592e2f6/info of b1f95ba699b0327bbfb9ea73b592e2f6 into 99135ae6026f431aa0d16ff93d243a7c(size=17.6 K), total size for store is 17.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-08T00:35:38,479 DEBUG [RS:0;3302f0f507bd:42513-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for b1f95ba699b0327bbfb9ea73b592e2f6: 2024-11-08T00:35:38,479 INFO [RS:0;3302f0f507bd:42513-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRollOnDatanodeDeath,,1731026117146.b1f95ba699b0327bbfb9ea73b592e2f6., storeName=b1f95ba699b0327bbfb9ea73b592e2f6/info, priority=13, startTime=1731026138027; duration=0sec 2024-11-08T00:35:38,479 DEBUG [RS:0;3302f0f507bd:42513-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-08T00:35:38,479 DEBUG [RS:0;3302f0f507bd:42513-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-08T00:35:38,479 DEBUG [RS:0;3302f0f507bd:42513-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b1f95ba699b0327bbfb9ea73b592e2f6/info/99135ae6026f431aa0d16ff93d243a7c because midkey is the same as first or last row 2024-11-08T00:35:38,479 DEBUG [RS:0;3302f0f507bd:42513-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-08T00:35:38,479 DEBUG [RS:0;3302f0f507bd:42513-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-08T00:35:38,479 DEBUG [RS:0;3302f0f507bd:42513-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b1f95ba699b0327bbfb9ea73b592e2f6/info/99135ae6026f431aa0d16ff93d243a7c because midkey is the same as first or last row 2024-11-08T00:35:38,479 DEBUG [RS:0;3302f0f507bd:42513-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-08T00:35:38,479 DEBUG [RS:0;3302f0f507bd:42513-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-08T00:35:38,479 DEBUG [RS:0;3302f0f507bd:42513-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b1f95ba699b0327bbfb9ea73b592e2f6/info/99135ae6026f431aa0d16ff93d243a7c because midkey is the same as first or last row 2024-11-08T00:35:38,479 DEBUG [RS:0;3302f0f507bd:42513-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-08T00:35:38,479 DEBUG [RS:0;3302f0f507bd:42513-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b1f95ba699b0327bbfb9ea73b592e2f6:info 2024-11-08T00:35:39,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42513 {}] regionserver.HRegion(8855): Flush requested on b1f95ba699b0327bbfb9ea73b592e2f6 2024-11-08T00:35:39,012 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing b1f95ba699b0327bbfb9ea73b592e2f6 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-08T00:35:39,017 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b1f95ba699b0327bbfb9ea73b592e2f6/.tmp/info/4de4937f0fd644f3b3b9a6bcaf7f1167 is 1079, key is tmprow/info:/1731026139011/Put/seqid=0 2024-11-08T00:35:39,019 WARN [Thread-955 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741868_1051 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-08T00:35:39,020 WARN [Thread-955 {}] hdfs.DataStreamer(1731): Error Recovery for BP-648086526-172.17.0.3-1731026113172:blk_1073741868_1051 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38985,DS-5e11058e-8356-4495-ab75-8441e34f8bac,DISK], DatanodeInfoWithStorage[127.0.0.1:39015,DS-8fc96e2d-ca22-49d5-a21f-b7aea3ee6969,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38985,DS-5e11058e-8356-4495-ab75-8441e34f8bac,DISK]) is bad. 2024-11-08T00:35:39,020 WARN [Thread-955 {}] hdfs.DataStreamer(1850): Abandoning BP-648086526-172.17.0.3-1731026113172:blk_1073741868_1051 2024-11-08T00:35:39,020 WARN [Thread-955 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38985,DS-5e11058e-8356-4495-ab75-8441e34f8bac,DISK] 2024-11-08T00:35:39,022 WARN [Thread-955 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741869_1052 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-08T00:35:39,022 WARN [Thread-955 {}] hdfs.DataStreamer(1731): Error Recovery for BP-648086526-172.17.0.3-1731026113172:blk_1073741869_1052 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39015,DS-8fc96e2d-ca22-49d5-a21f-b7aea3ee6969,DISK], DatanodeInfoWithStorage[127.0.0.1:34377,DS-79fac2fc-1df6-4762-84aa-e7ca600b7ca6,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39015,DS-8fc96e2d-ca22-49d5-a21f-b7aea3ee6969,DISK]) is bad. 2024-11-08T00:35:39,022 WARN [Thread-955 {}] hdfs.DataStreamer(1850): Abandoning BP-648086526-172.17.0.3-1731026113172:blk_1073741869_1052 2024-11-08T00:35:39,023 WARN [Thread-955 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39015,DS-8fc96e2d-ca22-49d5-a21f-b7aea3ee6969,DISK] 2024-11-08T00:35:39,024 WARN [Thread-955 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741870_1053 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-08T00:35:39,024 WARN [Thread-955 {}] hdfs.DataStreamer(1731): Error Recovery for BP-648086526-172.17.0.3-1731026113172:blk_1073741870_1053 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36123,DS-6d4ce634-4f50-4215-ac28-47265c61cdfe,DISK], DatanodeInfoWithStorage[127.0.0.1:33655,DS-e64dccec-6029-4220-b1b3-3165180151b4,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36123,DS-6d4ce634-4f50-4215-ac28-47265c61cdfe,DISK]) is bad. 2024-11-08T00:35:39,024 WARN [Thread-955 {}] hdfs.DataStreamer(1850): Abandoning BP-648086526-172.17.0.3-1731026113172:blk_1073741870_1053 2024-11-08T00:35:39,025 WARN [Thread-955 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36123,DS-6d4ce634-4f50-4215-ac28-47265c61cdfe,DISK] 2024-11-08T00:35:39,026 WARN [Thread-955 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741871_1054 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-08T00:35:39,026 WARN [Thread-955 {}] hdfs.DataStreamer(1731): Error Recovery for BP-648086526-172.17.0.3-1731026113172:blk_1073741871_1054 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33655,DS-e64dccec-6029-4220-b1b3-3165180151b4,DISK], DatanodeInfoWithStorage[127.0.0.1:34377,DS-79fac2fc-1df6-4762-84aa-e7ca600b7ca6,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33655,DS-e64dccec-6029-4220-b1b3-3165180151b4,DISK]) is bad. 2024-11-08T00:35:39,026 WARN [Thread-955 {}] hdfs.DataStreamer(1850): Abandoning BP-648086526-172.17.0.3-1731026113172:blk_1073741871_1054 2024-11-08T00:35:39,027 WARN [Thread-955 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33655,DS-e64dccec-6029-4220-b1b3-3165180151b4,DISK] 2024-11-08T00:35:39,028 WARN [IPC Server handler 4 on default port 42193 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-08T00:35:39,028 WARN [IPC Server handler 4 on default port 42193 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-08T00:35:39,028 WARN [IPC Server handler 4 on default port 42193 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-08T00:35:39,031 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34377 is added to blk_1073741872_1055 (size=6027) 2024-11-08T00:35:39,077 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@37279861[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:34377, datanodeUuid=9f14824b-723a-48c9-a12a-a80f2a62b14a, infoPort=42245, infoSecurePort=0, ipcPort=35837, storageInfo=lv=-57;cid=testClusterID;nsid=771899493;c=1731026113172):Failed to transfer BP-648086526-172.17.0.3-1731026113172:blk_1073741842_1025 to 127.0.0.1:39015 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-08T00:35:39,077 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@39e0612d[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:34377, datanodeUuid=9f14824b-723a-48c9-a12a-a80f2a62b14a, infoPort=42245, infoSecurePort=0, ipcPort=35837, storageInfo=lv=-57;cid=testClusterID;nsid=771899493;c=1731026113172):Failed to transfer BP-648086526-172.17.0.3-1731026113172:blk_1073741862_1045 to 127.0.0.1:36123 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-08T00:35:39,110 INFO [regionserver/3302f0f507bd:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33655,DS-e64dccec-6029-4220-b1b3-3165180151b4,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-08T00:35:39,432 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=45 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b1f95ba699b0327bbfb9ea73b592e2f6/.tmp/info/4de4937f0fd644f3b3b9a6bcaf7f1167 2024-11-08T00:35:39,440 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b1f95ba699b0327bbfb9ea73b592e2f6/.tmp/info/4de4937f0fd644f3b3b9a6bcaf7f1167 as hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b1f95ba699b0327bbfb9ea73b592e2f6/info/4de4937f0fd644f3b3b9a6bcaf7f1167 2024-11-08T00:35:39,447 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b1f95ba699b0327bbfb9ea73b592e2f6/info/4de4937f0fd644f3b3b9a6bcaf7f1167, entries=1, sequenceid=45, filesize=5.9 K 2024-11-08T00:35:39,448 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for b1f95ba699b0327bbfb9ea73b592e2f6 in 436ms, sequenceid=45, compaction requested=false 2024-11-08T00:35:39,449 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for b1f95ba699b0327bbfb9ea73b592e2f6: 2024-11-08T00:35:39,449 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=23.5 K, sizeToCheck=16.0 K 2024-11-08T00:35:39,449 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-08T00:35:39,449 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b1f95ba699b0327bbfb9ea73b592e2f6/info/99135ae6026f431aa0d16ff93d243a7c because midkey is the same as first or last row 2024-11-08T00:35:39,452 WARN [regionserver/3302f0f507bd:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:34377,DS-79fac2fc-1df6-4762-84aa-e7ca600b7ca6,DISK]] 2024-11-08T00:35:39,452 INFO [regionserver/3302f0f507bd:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33655,DS-e64dccec-6029-4220-b1b3-3165180151b4,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-08T00:35:39,453 DEBUG [regionserver/3302f0f507bd:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 3302f0f507bd%2C42513%2C1731026115632:(num 1731026137435) roll requested 2024-11-08T00:35:39,453 INFO [regionserver/3302f0f507bd:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3302f0f507bd%2C42513%2C1731026115632.1731026139453 2024-11-08T00:35:39,456 WARN [Thread-959 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741873_1056 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-08T00:35:39,456 WARN [Thread-959 {}] hdfs.DataStreamer(1731): Error Recovery for BP-648086526-172.17.0.3-1731026113172:blk_1073741873_1056 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38985,DS-5e11058e-8356-4495-ab75-8441e34f8bac,DISK], DatanodeInfoWithStorage[127.0.0.1:33655,DS-e64dccec-6029-4220-b1b3-3165180151b4,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38985,DS-5e11058e-8356-4495-ab75-8441e34f8bac,DISK]) is bad. 2024-11-08T00:35:39,456 WARN [Thread-959 {}] hdfs.DataStreamer(1850): Abandoning BP-648086526-172.17.0.3-1731026113172:blk_1073741873_1056 2024-11-08T00:35:39,457 WARN [Thread-959 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38985,DS-5e11058e-8356-4495-ab75-8441e34f8bac,DISK] 2024-11-08T00:35:39,458 WARN [Thread-959 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741874_1057 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-08T00:35:39,458 WARN [Thread-959 {}] hdfs.DataStreamer(1731): Error Recovery for BP-648086526-172.17.0.3-1731026113172:blk_1073741874_1057 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36123,DS-6d4ce634-4f50-4215-ac28-47265c61cdfe,DISK], DatanodeInfoWithStorage[127.0.0.1:33655,DS-e64dccec-6029-4220-b1b3-3165180151b4,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36123,DS-6d4ce634-4f50-4215-ac28-47265c61cdfe,DISK]) is bad. 2024-11-08T00:35:39,458 WARN [Thread-959 {}] hdfs.DataStreamer(1850): Abandoning BP-648086526-172.17.0.3-1731026113172:blk_1073741874_1057 2024-11-08T00:35:39,458 WARN [Thread-959 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36123,DS-6d4ce634-4f50-4215-ac28-47265c61cdfe,DISK] 2024-11-08T00:35:39,460 WARN [Thread-959 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741875_1058 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-08T00:35:39,460 WARN [Thread-959 {}] hdfs.DataStreamer(1731): Error Recovery for BP-648086526-172.17.0.3-1731026113172:blk_1073741875_1058 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33655,DS-e64dccec-6029-4220-b1b3-3165180151b4,DISK], DatanodeInfoWithStorage[127.0.0.1:39015,DS-8fc96e2d-ca22-49d5-a21f-b7aea3ee6969,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33655,DS-e64dccec-6029-4220-b1b3-3165180151b4,DISK]) is bad. 2024-11-08T00:35:39,460 WARN [Thread-959 {}] hdfs.DataStreamer(1850): Abandoning BP-648086526-172.17.0.3-1731026113172:blk_1073741875_1058 2024-11-08T00:35:39,461 WARN [Thread-959 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33655,DS-e64dccec-6029-4220-b1b3-3165180151b4,DISK] 2024-11-08T00:35:39,464 WARN [Thread-959 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741876_1059 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:39015 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-08T00:35:39,464 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1374943833_22 at /127.0.0.1:36332 [Receiving block BP-648086526-172.17.0.3-1731026113172:blk_1073741876_1059] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/33fe3ad3-2016-67dd-bf13-d3c46fb5d9ce/cluster_94d4fa05-7ba4-b350-d218-42d4a2c67eba/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/33fe3ad3-2016-67dd-bf13-d3c46fb5d9ce/cluster_94d4fa05-7ba4-b350-d218-42d4a2c67eba/data/data6]'}, localName='127.0.0.1:34377', datanodeUuid='9f14824b-723a-48c9-a12a-a80f2a62b14a', xmitsInProgress=0}:Exception transferring block BP-648086526-172.17.0.3-1731026113172:blk_1073741876_1059 to mirror 127.0.0.1:39015 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-08T00:35:39,464 WARN [Thread-959 {}] hdfs.DataStreamer(1731): Error Recovery for BP-648086526-172.17.0.3-1731026113172:blk_1073741876_1059 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34377,DS-79fac2fc-1df6-4762-84aa-e7ca600b7ca6,DISK], DatanodeInfoWithStorage[127.0.0.1:39015,DS-8fc96e2d-ca22-49d5-a21f-b7aea3ee6969,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:39015,DS-8fc96e2d-ca22-49d5-a21f-b7aea3ee6969,DISK]) is bad. 2024-11-08T00:35:39,464 WARN [Thread-959 {}] hdfs.DataStreamer(1850): Abandoning BP-648086526-172.17.0.3-1731026113172:blk_1073741876_1059 2024-11-08T00:35:39,464 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1374943833_22 at /127.0.0.1:36332 [Receiving block BP-648086526-172.17.0.3-1731026113172:blk_1073741876_1059] {}] datanode.BlockReceiver(316): Block 1073741876 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-08T00:35:39,464 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1374943833_22 at /127.0.0.1:36332 [Receiving block BP-648086526-172.17.0.3-1731026113172:blk_1073741876_1059] {}] datanode.DataXceiver(331): 127.0.0.1:34377:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36332 dst: /127.0.0.1:34377 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-08T00:35:39,465 WARN [Thread-959 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39015,DS-8fc96e2d-ca22-49d5-a21f-b7aea3ee6969,DISK] 2024-11-08T00:35:39,465 WARN [IPC Server handler 4 on default port 42193 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-08T00:35:39,465 WARN [IPC Server handler 4 on default port 42193 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-08T00:35:39,465 WARN [IPC Server handler 4 on default port 42193 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-08T00:35:39,468 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:35:39,468 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:35:39,468 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:35:39,468 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:35:39,468 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:35:39,469 INFO [regionserver/3302f0f507bd:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.1731026137435 with entries=15, filesize=13.26 KB; new WAL /user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.1731026139453 2024-11-08T00:35:39,470 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34377 is added to blk_1073741857_1040 (size=13591) 2024-11-08T00:35:39,476 DEBUG [regionserver/3302f0f507bd:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42245:42245)] 2024-11-08T00:35:39,477 DEBUG [regionserver/3302f0f507bd:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.1731026116317 is not closed yet, will try archiving it next time 2024-11-08T00:35:39,477 DEBUG [regionserver/3302f0f507bd:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.1731026137435 is not closed yet, will try archiving it next time 2024-11-08T00:35:39,477 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.1731026133413 to hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/oldWALs/3302f0f507bd%2C42513%2C1731026115632.1731026133413 2024-11-08T00:35:39,778 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33655,DS-e64dccec-6029-4220-b1b3-3165180151b4,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-08T00:35:39,871 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.1731026116317 is not closed yet, will try archiving it next time 2024-11-08T00:35:40,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42513 {}] regionserver.HRegion(8855): Flush requested on b1f95ba699b0327bbfb9ea73b592e2f6 2024-11-08T00:35:40,435 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing b1f95ba699b0327bbfb9ea73b592e2f6 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-08T00:35:40,441 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b1f95ba699b0327bbfb9ea73b592e2f6/.tmp/info/4464110ba99a490385b87d51b4b441e5 is 1079, key is tmprow/info:/1731026140433/Put/seqid=0 2024-11-08T00:35:40,443 WARN [Thread-964 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741878_1061 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-08T00:35:40,444 WARN [Thread-964 {}] hdfs.DataStreamer(1731): Error Recovery for BP-648086526-172.17.0.3-1731026113172:blk_1073741878_1061 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38985,DS-5e11058e-8356-4495-ab75-8441e34f8bac,DISK], DatanodeInfoWithStorage[127.0.0.1:33655,DS-e64dccec-6029-4220-b1b3-3165180151b4,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38985,DS-5e11058e-8356-4495-ab75-8441e34f8bac,DISK]) is bad. 2024-11-08T00:35:40,444 WARN [Thread-964 {}] hdfs.DataStreamer(1850): Abandoning BP-648086526-172.17.0.3-1731026113172:blk_1073741878_1061 2024-11-08T00:35:40,445 WARN [Thread-964 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38985,DS-5e11058e-8356-4495-ab75-8441e34f8bac,DISK] 2024-11-08T00:35:40,446 WARN [Thread-964 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741879_1062 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-08T00:35:40,446 WARN [Thread-964 {}] hdfs.DataStreamer(1731): Error Recovery for BP-648086526-172.17.0.3-1731026113172:blk_1073741879_1062 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36123,DS-6d4ce634-4f50-4215-ac28-47265c61cdfe,DISK], DatanodeInfoWithStorage[127.0.0.1:39015,DS-8fc96e2d-ca22-49d5-a21f-b7aea3ee6969,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36123,DS-6d4ce634-4f50-4215-ac28-47265c61cdfe,DISK]) is bad. 2024-11-08T00:35:40,446 WARN [Thread-964 {}] hdfs.DataStreamer(1850): Abandoning BP-648086526-172.17.0.3-1731026113172:blk_1073741879_1062 2024-11-08T00:35:40,447 WARN [Thread-964 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36123,DS-6d4ce634-4f50-4215-ac28-47265c61cdfe,DISK] 2024-11-08T00:35:40,448 WARN [Thread-964 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741880_1063 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-08T00:35:40,448 WARN [Thread-964 {}] hdfs.DataStreamer(1731): Error Recovery for BP-648086526-172.17.0.3-1731026113172:blk_1073741880_1063 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33655,DS-e64dccec-6029-4220-b1b3-3165180151b4,DISK], DatanodeInfoWithStorage[127.0.0.1:34377,DS-79fac2fc-1df6-4762-84aa-e7ca600b7ca6,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33655,DS-e64dccec-6029-4220-b1b3-3165180151b4,DISK]) is bad. 2024-11-08T00:35:40,448 WARN [Thread-964 {}] hdfs.DataStreamer(1850): Abandoning BP-648086526-172.17.0.3-1731026113172:blk_1073741880_1063 2024-11-08T00:35:40,449 WARN [Thread-964 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33655,DS-e64dccec-6029-4220-b1b3-3165180151b4,DISK] 2024-11-08T00:35:40,450 WARN [Thread-964 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741881_1064 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-08T00:35:40,450 WARN [Thread-964 {}] hdfs.DataStreamer(1731): Error Recovery for BP-648086526-172.17.0.3-1731026113172:blk_1073741881_1064 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39015,DS-8fc96e2d-ca22-49d5-a21f-b7aea3ee6969,DISK], DatanodeInfoWithStorage[127.0.0.1:34377,DS-79fac2fc-1df6-4762-84aa-e7ca600b7ca6,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39015,DS-8fc96e2d-ca22-49d5-a21f-b7aea3ee6969,DISK]) is bad. 2024-11-08T00:35:40,450 WARN [Thread-964 {}] hdfs.DataStreamer(1850): Abandoning BP-648086526-172.17.0.3-1731026113172:blk_1073741881_1064 2024-11-08T00:35:40,451 WARN [Thread-964 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39015,DS-8fc96e2d-ca22-49d5-a21f-b7aea3ee6969,DISK] 2024-11-08T00:35:40,452 WARN [IPC Server handler 4 on default port 42193 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-08T00:35:40,452 WARN [IPC Server handler 4 on default port 42193 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-08T00:35:40,452 WARN [IPC Server handler 4 on default port 42193 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-08T00:35:40,455 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34377 is added to blk_1073741882_1065 (size=6027) 2024-11-08T00:35:40,856 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=55 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b1f95ba699b0327bbfb9ea73b592e2f6/.tmp/info/4464110ba99a490385b87d51b4b441e5 2024-11-08T00:35:40,863 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b1f95ba699b0327bbfb9ea73b592e2f6/.tmp/info/4464110ba99a490385b87d51b4b441e5 as hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b1f95ba699b0327bbfb9ea73b592e2f6/info/4464110ba99a490385b87d51b4b441e5 2024-11-08T00:35:40,869 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b1f95ba699b0327bbfb9ea73b592e2f6/info/4464110ba99a490385b87d51b4b441e5, entries=1, sequenceid=55, filesize=5.9 K 2024-11-08T00:35:40,871 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for b1f95ba699b0327bbfb9ea73b592e2f6 in 436ms, sequenceid=55, compaction requested=true 2024-11-08T00:35:40,871 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for b1f95ba699b0327bbfb9ea73b592e2f6: 2024-11-08T00:35:40,871 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=29.3 K, sizeToCheck=16.0 K 2024-11-08T00:35:40,871 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-08T00:35:40,871 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b1f95ba699b0327bbfb9ea73b592e2f6/info/99135ae6026f431aa0d16ff93d243a7c because midkey is the same as first or last row 2024-11-08T00:35:40,871 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b1f95ba699b0327bbfb9ea73b592e2f6:info, priority=-2147483648, current under compaction store size is 1 2024-11-08T00:35:40,871 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-08T00:35:40,871 DEBUG [RS:0;3302f0f507bd:42513-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-08T00:35:40,873 DEBUG [RS:0;3302f0f507bd:42513-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 30048 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-08T00:35:40,873 DEBUG [RS:0;3302f0f507bd:42513-shortCompactions-0 {}] regionserver.HStore(1541): b1f95ba699b0327bbfb9ea73b592e2f6/info is initiating minor compaction (all files) 2024-11-08T00:35:40,873 INFO [RS:0;3302f0f507bd:42513-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of b1f95ba699b0327bbfb9ea73b592e2f6/info in TestLogRolling-testLogRollOnDatanodeDeath,,1731026117146.b1f95ba699b0327bbfb9ea73b592e2f6. 2024-11-08T00:35:40,874 INFO [RS:0;3302f0f507bd:42513-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b1f95ba699b0327bbfb9ea73b592e2f6/info/99135ae6026f431aa0d16ff93d243a7c, hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b1f95ba699b0327bbfb9ea73b592e2f6/info/4de4937f0fd644f3b3b9a6bcaf7f1167, hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b1f95ba699b0327bbfb9ea73b592e2f6/info/4464110ba99a490385b87d51b4b441e5] into tmpdir=hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b1f95ba699b0327bbfb9ea73b592e2f6/.tmp, totalSize=29.3 K 2024-11-08T00:35:40,874 DEBUG [RS:0;3302f0f507bd:42513-shortCompactions-0 {}] compactions.Compactor(225): Compacting 99135ae6026f431aa0d16ff93d243a7c, keycount=12, bloomtype=ROW, size=17.6 K, encoding=NONE, compression=NONE, seqNum=34, earliestPutTs=1731026131470 2024-11-08T00:35:40,875 DEBUG [RS:0;3302f0f507bd:42513-shortCompactions-0 {}] compactions.Compactor(225): Compacting 4de4937f0fd644f3b3b9a6bcaf7f1167, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=45, earliestPutTs=1731026139011 2024-11-08T00:35:40,875 DEBUG [RS:0;3302f0f507bd:42513-shortCompactions-0 {}] compactions.Compactor(225): Compacting 4464110ba99a490385b87d51b4b441e5, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1731026140433 2024-11-08T00:35:40,894 INFO [RS:0;3302f0f507bd:42513-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): b1f95ba699b0327bbfb9ea73b592e2f6#info#compaction#24 average throughput is 6.16 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-08T00:35:40,894 DEBUG [RS:0;3302f0f507bd:42513-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b1f95ba699b0327bbfb9ea73b592e2f6/.tmp/info/efe73729f82b4d268b8a7fbe28793a66 is 1080, key is row0002/info:/1731026131470/Put/seqid=0 2024-11-08T00:35:40,896 WARN [Thread-968 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741883_1066 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-08T00:35:40,896 WARN [Thread-968 {}] hdfs.DataStreamer(1731): Error Recovery for BP-648086526-172.17.0.3-1731026113172:blk_1073741883_1066 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33655,DS-e64dccec-6029-4220-b1b3-3165180151b4,DISK], DatanodeInfoWithStorage[127.0.0.1:39015,DS-8fc96e2d-ca22-49d5-a21f-b7aea3ee6969,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33655,DS-e64dccec-6029-4220-b1b3-3165180151b4,DISK]) is bad. 2024-11-08T00:35:40,896 WARN [Thread-968 {}] hdfs.DataStreamer(1850): Abandoning BP-648086526-172.17.0.3-1731026113172:blk_1073741883_1066 2024-11-08T00:35:40,897 WARN [Thread-968 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33655,DS-e64dccec-6029-4220-b1b3-3165180151b4,DISK] 2024-11-08T00:35:40,898 WARN [Thread-968 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741884_1067 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-08T00:35:40,898 WARN [Thread-968 {}] hdfs.DataStreamer(1731): Error Recovery for BP-648086526-172.17.0.3-1731026113172:blk_1073741884_1067 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39015,DS-8fc96e2d-ca22-49d5-a21f-b7aea3ee6969,DISK], DatanodeInfoWithStorage[127.0.0.1:36123,DS-6d4ce634-4f50-4215-ac28-47265c61cdfe,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39015,DS-8fc96e2d-ca22-49d5-a21f-b7aea3ee6969,DISK]) is bad. 2024-11-08T00:35:40,898 WARN [Thread-968 {}] hdfs.DataStreamer(1850): Abandoning BP-648086526-172.17.0.3-1731026113172:blk_1073741884_1067 2024-11-08T00:35:40,899 WARN [Thread-968 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39015,DS-8fc96e2d-ca22-49d5-a21f-b7aea3ee6969,DISK] 2024-11-08T00:35:40,901 WARN [Thread-968 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741885_1068 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:38985 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-08T00:35:40,901 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1374943833_22 at /127.0.0.1:36352 [Receiving block BP-648086526-172.17.0.3-1731026113172:blk_1073741885_1068] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/33fe3ad3-2016-67dd-bf13-d3c46fb5d9ce/cluster_94d4fa05-7ba4-b350-d218-42d4a2c67eba/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/33fe3ad3-2016-67dd-bf13-d3c46fb5d9ce/cluster_94d4fa05-7ba4-b350-d218-42d4a2c67eba/data/data6]'}, localName='127.0.0.1:34377', datanodeUuid='9f14824b-723a-48c9-a12a-a80f2a62b14a', xmitsInProgress=0}:Exception transferring block BP-648086526-172.17.0.3-1731026113172:blk_1073741885_1068 to mirror 127.0.0.1:38985 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-08T00:35:40,901 WARN [Thread-968 {}] hdfs.DataStreamer(1731): Error Recovery for BP-648086526-172.17.0.3-1731026113172:blk_1073741885_1068 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34377,DS-79fac2fc-1df6-4762-84aa-e7ca600b7ca6,DISK], DatanodeInfoWithStorage[127.0.0.1:38985,DS-5e11058e-8356-4495-ab75-8441e34f8bac,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:38985,DS-5e11058e-8356-4495-ab75-8441e34f8bac,DISK]) is bad. 2024-11-08T00:35:40,901 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1374943833_22 at /127.0.0.1:36352 [Receiving block BP-648086526-172.17.0.3-1731026113172:blk_1073741885_1068] {}] datanode.BlockReceiver(316): Block 1073741885 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-08T00:35:40,901 WARN [Thread-968 {}] hdfs.DataStreamer(1850): Abandoning BP-648086526-172.17.0.3-1731026113172:blk_1073741885_1068 2024-11-08T00:35:40,901 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1374943833_22 at /127.0.0.1:36352 [Receiving block BP-648086526-172.17.0.3-1731026113172:blk_1073741885_1068] {}] datanode.DataXceiver(331): 127.0.0.1:34377:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36352 dst: /127.0.0.1:34377 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-08T00:35:40,902 WARN [Thread-968 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38985,DS-5e11058e-8356-4495-ab75-8441e34f8bac,DISK] 2024-11-08T00:35:40,903 WARN [Thread-968 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741886_1069 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-08T00:35:40,903 WARN [Thread-968 {}] hdfs.DataStreamer(1731): Error Recovery for BP-648086526-172.17.0.3-1731026113172:blk_1073741886_1069 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36123,DS-6d4ce634-4f50-4215-ac28-47265c61cdfe,DISK], DatanodeInfoWithStorage[127.0.0.1:34377,DS-79fac2fc-1df6-4762-84aa-e7ca600b7ca6,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36123,DS-6d4ce634-4f50-4215-ac28-47265c61cdfe,DISK]) is bad. 2024-11-08T00:35:40,903 WARN [Thread-968 {}] hdfs.DataStreamer(1850): Abandoning BP-648086526-172.17.0.3-1731026113172:blk_1073741886_1069 2024-11-08T00:35:40,903 WARN [Thread-968 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36123,DS-6d4ce634-4f50-4215-ac28-47265c61cdfe,DISK] 2024-11-08T00:35:40,904 WARN [IPC Server handler 4 on default port 42193 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-08T00:35:40,904 WARN [IPC Server handler 4 on default port 42193 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-08T00:35:40,904 WARN [IPC Server handler 4 on default port 42193 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-08T00:35:40,907 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34377 is added to blk_1073741887_1070 (size=18097) 2024-11-08T00:35:40,915 DEBUG [RS:0;3302f0f507bd:42513-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b1f95ba699b0327bbfb9ea73b592e2f6/.tmp/info/efe73729f82b4d268b8a7fbe28793a66 as hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b1f95ba699b0327bbfb9ea73b592e2f6/info/efe73729f82b4d268b8a7fbe28793a66 2024-11-08T00:35:40,923 INFO [RS:0;3302f0f507bd:42513-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in b1f95ba699b0327bbfb9ea73b592e2f6/info of b1f95ba699b0327bbfb9ea73b592e2f6 into efe73729f82b4d268b8a7fbe28793a66(size=17.7 K), total size for store is 17.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-08T00:35:40,923 DEBUG [RS:0;3302f0f507bd:42513-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for b1f95ba699b0327bbfb9ea73b592e2f6: 2024-11-08T00:35:40,923 INFO [RS:0;3302f0f507bd:42513-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRollOnDatanodeDeath,,1731026117146.b1f95ba699b0327bbfb9ea73b592e2f6., storeName=b1f95ba699b0327bbfb9ea73b592e2f6/info, priority=13, startTime=1731026140871; duration=0sec 2024-11-08T00:35:40,923 DEBUG [RS:0;3302f0f507bd:42513-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.7 K, sizeToCheck=16.0 K 2024-11-08T00:35:40,923 DEBUG [RS:0;3302f0f507bd:42513-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-08T00:35:40,923 DEBUG [RS:0;3302f0f507bd:42513-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b1f95ba699b0327bbfb9ea73b592e2f6/info/efe73729f82b4d268b8a7fbe28793a66 because midkey is the same as first or last row 2024-11-08T00:35:40,923 DEBUG [RS:0;3302f0f507bd:42513-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.7 K, sizeToCheck=16.0 K 2024-11-08T00:35:40,923 DEBUG [RS:0;3302f0f507bd:42513-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-08T00:35:40,923 DEBUG [RS:0;3302f0f507bd:42513-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b1f95ba699b0327bbfb9ea73b592e2f6/info/efe73729f82b4d268b8a7fbe28793a66 because midkey is the same as first or last row 2024-11-08T00:35:40,924 DEBUG [RS:0;3302f0f507bd:42513-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.7 K, sizeToCheck=16.0 K 2024-11-08T00:35:40,924 DEBUG [RS:0;3302f0f507bd:42513-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-08T00:35:40,924 DEBUG [RS:0;3302f0f507bd:42513-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b1f95ba699b0327bbfb9ea73b592e2f6/info/efe73729f82b4d268b8a7fbe28793a66 because midkey is the same as first or last row 2024-11-08T00:35:40,924 DEBUG [RS:0;3302f0f507bd:42513-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-08T00:35:40,924 DEBUG [RS:0;3302f0f507bd:42513-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b1f95ba699b0327bbfb9ea73b592e2f6:info 2024-11-08T00:35:41,077 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@37279861[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:34377, datanodeUuid=9f14824b-723a-48c9-a12a-a80f2a62b14a, infoPort=42245, infoSecurePort=0, ipcPort=35837, storageInfo=lv=-57;cid=testClusterID;nsid=771899493;c=1731026113172):Failed to transfer BP-648086526-172.17.0.3-1731026113172:blk_1073741872_1055 to 127.0.0.1:36123 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-08T00:35:41,077 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@39e0612d[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:34377, datanodeUuid=9f14824b-723a-48c9-a12a-a80f2a62b14a, infoPort=42245, infoSecurePort=0, ipcPort=35837, storageInfo=lv=-57;cid=testClusterID;nsid=771899493;c=1731026113172):Failed to transfer BP-648086526-172.17.0.3-1731026113172:blk_1073741867_1050 to 127.0.0.1:38985 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-08T00:35:41,110 INFO [regionserver/3302f0f507bd:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33655,DS-e64dccec-6029-4220-b1b3-3165180151b4,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-08T00:35:41,477 WARN [regionserver/3302f0f507bd:0.logRoller {}] wal.FSHLog(539): Too many consecutive RollWriter requests, it's a sign of the total number of live datanodes is lower than the tolerable replicas. 2024-11-08T00:35:41,477 INFO [regionserver/3302f0f507bd:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33655,DS-e64dccec-6029-4220-b1b3-3165180151b4,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-08T00:35:41,663 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-08T00:35:41,668 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-08T00:35:41,673 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-08T00:35:41,673 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-08T00:35:41,673 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-08T00:35:41,674 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@557202e0{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/33fe3ad3-2016-67dd-bf13-d3c46fb5d9ce/hadoop.log.dir/,AVAILABLE} 2024-11-08T00:35:41,674 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1b1c2893{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-08T00:35:41,779 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33655,DS-e64dccec-6029-4220-b1b3-3165180151b4,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-08T00:35:41,783 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1400d7c{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/33fe3ad3-2016-67dd-bf13-d3c46fb5d9ce/java.io.tmpdir/jetty-localhost-42195-hadoop-hdfs-3_4_1-tests_jar-_-any-13660747056182557860/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-08T00:35:41,783 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@122a196d{HTTP/1.1, (http/1.1)}{localhost:42195} 2024-11-08T00:35:41,783 INFO [Time-limited test {}] server.Server(415): Started @135880ms 2024-11-08T00:35:41,784 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-08T00:35:42,077 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@37279861[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:34377, datanodeUuid=9f14824b-723a-48c9-a12a-a80f2a62b14a, infoPort=42245, infoSecurePort=0, ipcPort=35837, storageInfo=lv=-57;cid=testClusterID;nsid=771899493;c=1731026113172):Failed to transfer BP-648086526-172.17.0.3-1731026113172:blk_1073741882_1065 to 127.0.0.1:33655 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-08T00:35:42,077 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@39e0612d[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:34377, datanodeUuid=9f14824b-723a-48c9-a12a-a80f2a62b14a, infoPort=42245, infoSecurePort=0, ipcPort=35837, storageInfo=lv=-57;cid=testClusterID;nsid=771899493;c=1731026113172):Failed to transfer BP-648086526-172.17.0.3-1731026113172:blk_1073741857_1040 to 127.0.0.1:33655 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-08T00:35:42,270 WARN [Thread-987 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-08T00:35:42,279 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa4c14605ece6322e with lease ID 0xafb28d9be54f5e53: from storage DS-5e11058e-8356-4495-ab75-8441e34f8bac node DatanodeRegistration(127.0.0.1:40309, datanodeUuid=1be0d67d-0789-4e98-b5fc-be9cc512b3fb, infoPort=38691, infoSecurePort=0, ipcPort=33685, storageInfo=lv=-57;cid=testClusterID;nsid=771899493;c=1731026113172), blocks: 6, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-08T00:35:42,279 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa4c14605ece6322e with lease ID 0xafb28d9be54f5e53: from storage DS-0713e8c4-258c-4423-bff3-95c2f3fab339 node DatanodeRegistration(127.0.0.1:40309, datanodeUuid=1be0d67d-0789-4e98-b5fc-be9cc512b3fb, infoPort=38691, infoSecurePort=0, ipcPort=33685, storageInfo=lv=-57;cid=testClusterID;nsid=771899493;c=1731026113172), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-08T00:35:43,110 INFO [regionserver/3302f0f507bd:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33655,DS-e64dccec-6029-4220-b1b3-3165180151b4,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-08T00:35:43,477 INFO [regionserver/3302f0f507bd:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33655,DS-e64dccec-6029-4220-b1b3-3165180151b4,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-08T00:35:43,779 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33655,DS-e64dccec-6029-4220-b1b3-3165180151b4,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-08T00:35:44,078 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@39e0612d[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:34377, datanodeUuid=9f14824b-723a-48c9-a12a-a80f2a62b14a, infoPort=42245, infoSecurePort=0, ipcPort=35837, storageInfo=lv=-57;cid=testClusterID;nsid=771899493;c=1731026113172):Failed to transfer BP-648086526-172.17.0.3-1731026113172:blk_1073741887_1070 to 127.0.0.1:39015 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-08T00:35:45,111 INFO [regionserver/3302f0f507bd:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33655,DS-e64dccec-6029-4220-b1b3-3165180151b4,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-08T00:35:45,429 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-08T00:35:45,478 INFO [regionserver/3302f0f507bd:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33655,DS-e64dccec-6029-4220-b1b3-3165180151b4,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-08T00:35:45,779 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33655,DS-e64dccec-6029-4220-b1b3-3165180151b4,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-08T00:35:46,018 ERROR [FSHLog-0-hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/MasterData-prefix:3302f0f507bd,33483,1731026115449 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33655,DS-e64dccec-6029-4220-b1b3-3165180151b4,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-08T00:35:46,018 WARN [FSHLog-0-hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/MasterData-prefix:3302f0f507bd,33483,1731026115449 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33655,DS-e64dccec-6029-4220-b1b3-3165180151b4,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-08T00:35:46,019 DEBUG [master:store-WAL-Roller {}] wal.AbstractWALRoller(198): WAL FSHLog 3302f0f507bd%2C33483%2C1731026115449:(num 1731026115799) roll requested 2024-11-08T00:35:46,019 INFO [master:store-WAL-Roller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3302f0f507bd%2C33483%2C1731026115449.1731026146019 2024-11-08T00:35:46,027 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:35:46,028 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:35:46,028 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:35:46,028 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:35:46,028 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:35:46,028 INFO [master:store-WAL-Roller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/MasterData/WALs/3302f0f507bd,33483,1731026115449/3302f0f507bd%2C33483%2C1731026115449.1731026115799 with entries=54, filesize=26.68 KB; new WAL /user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/MasterData/WALs/3302f0f507bd,33483,1731026115449/3302f0f507bd%2C33483%2C1731026115449.1731026146019 2024-11-08T00:35:46,029 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33655,DS-e64dccec-6029-4220-b1b3-3165180151b4,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-08T00:35:46,029 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33655,DS-e64dccec-6029-4220-b1b3-3165180151b4,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-08T00:35:46,029 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/MasterData/WALs/3302f0f507bd,33483,1731026115449/3302f0f507bd%2C33483%2C1731026115449.1731026115799 2024-11-08T00:35:46,030 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38691:38691),(127.0.0.1/127.0.0.1:42245:42245)] 2024-11-08T00:35:46,030 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(879): hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/MasterData/WALs/3302f0f507bd,33483,1731026115449/3302f0f507bd%2C33483%2C1731026115449.1731026115799 is not closed yet, will try archiving it next time 2024-11-08T00:35:46,030 WARN [IPC Server handler 3 on default port 42193 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/MasterData/WALs/3302f0f507bd,33483,1731026115449/3302f0f507bd%2C33483%2C1731026115449.1731026115799 has not been closed. Lease recovery is in progress. RecoveryId = 1072 for block blk_1073741830_1006 2024-11-08T00:35:46,030 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/MasterData/WALs/3302f0f507bd,33483,1731026115449/3302f0f507bd%2C33483%2C1731026115449.1731026115799 after 1ms 2024-11-08T00:35:47,111 INFO [regionserver/3302f0f507bd:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33655,DS-e64dccec-6029-4220-b1b3-3165180151b4,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-08T00:35:47,478 INFO [regionserver/3302f0f507bd:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33655,DS-e64dccec-6029-4220-b1b3-3165180151b4,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-08T00:35:49,112 INFO [regionserver/3302f0f507bd:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33655,DS-e64dccec-6029-4220-b1b3-3165180151b4,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-08T00:35:49,479 INFO [regionserver/3302f0f507bd:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33655,DS-e64dccec-6029-4220-b1b3-3165180151b4,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-08T00:35:50,032 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/MasterData/WALs/3302f0f507bd,33483,1731026115449/3302f0f507bd%2C33483%2C1731026115449.1731026115799 after 4003ms 2024-11-08T00:35:51,112 INFO [regionserver/3302f0f507bd:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33655,DS-e64dccec-6029-4220-b1b3-3165180151b4,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-08T00:35:51,479 INFO [regionserver/3302f0f507bd:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33655,DS-e64dccec-6029-4220-b1b3-3165180151b4,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-08T00:35:52,295 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@54a1ee7a {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-648086526-172.17.0.3-1731026113172:blk_1073741833_1009, datanode=DatanodeInfoWithStorage[127.0.0.1:33655,null,null]) java.net.ConnectException: Call From 3302f0f507bd/172.17.0.3 to localhost:35163 failed on connection exception: java.net.ConnectException: Connection refused; For more details see: http://wiki.apache.org/hadoop/ConnectionRefused at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:876) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:668) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-11-08T00:35:52,298 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40309 is added to blk_1073741833_1019 (size=455) 2024-11-08T00:35:52,436 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.1731026116317 to hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/oldWALs/3302f0f507bd%2C42513%2C1731026115632.1731026116317 2024-11-08T00:35:52,438 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.1731026137435 to hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/oldWALs/3302f0f507bd%2C42513%2C1731026115632.1731026137435 2024-11-08T00:35:53,112 INFO [regionserver/3302f0f507bd:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33655,DS-e64dccec-6029-4220-b1b3-3165180151b4,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-08T00:35:53,275 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@65bf7c3e[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:40309, datanodeUuid=1be0d67d-0789-4e98-b5fc-be9cc512b3fb, infoPort=38691, infoSecurePort=0, ipcPort=33685, storageInfo=lv=-57;cid=testClusterID;nsid=771899493;c=1731026113172):Failed to transfer BP-648086526-172.17.0.3-1731026113172:blk_1073741833_1019 to 127.0.0.1:36123 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-08T00:35:53,480 INFO [regionserver/3302f0f507bd:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33655,DS-e64dccec-6029-4220-b1b3-3165180151b4,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-08T00:35:55,113 INFO [regionserver/3302f0f507bd:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33655,DS-e64dccec-6029-4220-b1b3-3165180151b4,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-08T00:35:55,326 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3302f0f507bd%2C42513%2C1731026115632.1731026155326 2024-11-08T00:35:55,333 WARN [Thread-1017 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741889_1073 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:36123 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-08T00:35:55,333 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1440924304_22 at /127.0.0.1:47464 [Receiving block BP-648086526-172.17.0.3-1731026113172:blk_1073741889_1073] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/33fe3ad3-2016-67dd-bf13-d3c46fb5d9ce/cluster_94d4fa05-7ba4-b350-d218-42d4a2c67eba/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/33fe3ad3-2016-67dd-bf13-d3c46fb5d9ce/cluster_94d4fa05-7ba4-b350-d218-42d4a2c67eba/data/data6]'}, localName='127.0.0.1:34377', datanodeUuid='9f14824b-723a-48c9-a12a-a80f2a62b14a', xmitsInProgress=0}:Exception transferring block BP-648086526-172.17.0.3-1731026113172:blk_1073741889_1073 to mirror 127.0.0.1:36123 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-08T00:35:55,333 WARN [Thread-1017 {}] hdfs.DataStreamer(1731): Error Recovery for BP-648086526-172.17.0.3-1731026113172:blk_1073741889_1073 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34377,DS-79fac2fc-1df6-4762-84aa-e7ca600b7ca6,DISK], DatanodeInfoWithStorage[127.0.0.1:36123,DS-6d4ce634-4f50-4215-ac28-47265c61cdfe,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:36123,DS-6d4ce634-4f50-4215-ac28-47265c61cdfe,DISK]) is bad. 2024-11-08T00:35:55,333 WARN [Thread-1017 {}] hdfs.DataStreamer(1850): Abandoning BP-648086526-172.17.0.3-1731026113172:blk_1073741889_1073 2024-11-08T00:35:55,333 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1440924304_22 at /127.0.0.1:47464 [Receiving block BP-648086526-172.17.0.3-1731026113172:blk_1073741889_1073] {}] datanode.BlockReceiver(316): Block 1073741889 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-08T00:35:55,334 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1440924304_22 at /127.0.0.1:47464 [Receiving block BP-648086526-172.17.0.3-1731026113172:blk_1073741889_1073] {}] datanode.DataXceiver(331): 127.0.0.1:34377:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:47464 dst: /127.0.0.1:34377 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-08T00:35:55,334 WARN [Thread-1017 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36123,DS-6d4ce634-4f50-4215-ac28-47265c61cdfe,DISK] 2024-11-08T00:35:55,339 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:35:55,339 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:35:55,339 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:35:55,339 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:35:55,340 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:35:55,340 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.1731026139453 with entries=13, filesize=12.60 KB; new WAL /user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.1731026155326 2024-11-08T00:35:55,341 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34377 is added to blk_1073741877_1060 (size=12911) 2024-11-08T00:35:55,344 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38691:38691),(127.0.0.1/127.0.0.1:42245:42245)] 2024-11-08T00:35:55,344 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.1731026139453 is not closed yet, will try archiving it next time 2024-11-08T00:35:55,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42513 {}] regionserver.HRegion(8855): Flush requested on b1f95ba699b0327bbfb9ea73b592e2f6 2024-11-08T00:35:55,348 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing b1f95ba699b0327bbfb9ea73b592e2f6 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-08T00:35:55,353 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b1f95ba699b0327bbfb9ea73b592e2f6/.tmp/info/222cea4eb5dc4e8299de8481e6753269 is 1080, key is row0013/info:/1731026155345/Put/seqid=0 2024-11-08T00:35:55,356 WARN [Thread-1024 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741891_1075 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:36123 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-08T00:35:55,356 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1374943833_22 at /127.0.0.1:47482 [Receiving block BP-648086526-172.17.0.3-1731026113172:blk_1073741891_1075] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/33fe3ad3-2016-67dd-bf13-d3c46fb5d9ce/cluster_94d4fa05-7ba4-b350-d218-42d4a2c67eba/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/33fe3ad3-2016-67dd-bf13-d3c46fb5d9ce/cluster_94d4fa05-7ba4-b350-d218-42d4a2c67eba/data/data6]'}, localName='127.0.0.1:34377', datanodeUuid='9f14824b-723a-48c9-a12a-a80f2a62b14a', xmitsInProgress=0}:Exception transferring block BP-648086526-172.17.0.3-1731026113172:blk_1073741891_1075 to mirror 127.0.0.1:36123 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-08T00:35:55,356 WARN [Thread-1024 {}] hdfs.DataStreamer(1731): Error Recovery for BP-648086526-172.17.0.3-1731026113172:blk_1073741891_1075 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34377,DS-79fac2fc-1df6-4762-84aa-e7ca600b7ca6,DISK], DatanodeInfoWithStorage[127.0.0.1:36123,DS-6d4ce634-4f50-4215-ac28-47265c61cdfe,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:36123,DS-6d4ce634-4f50-4215-ac28-47265c61cdfe,DISK]) is bad. 2024-11-08T00:35:55,356 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1374943833_22 at /127.0.0.1:47482 [Receiving block BP-648086526-172.17.0.3-1731026113172:blk_1073741891_1075] {}] datanode.BlockReceiver(316): Block 1073741891 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-08T00:35:55,356 WARN [Thread-1024 {}] hdfs.DataStreamer(1850): Abandoning BP-648086526-172.17.0.3-1731026113172:blk_1073741891_1075 2024-11-08T00:35:55,356 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1374943833_22 at /127.0.0.1:47482 [Receiving block BP-648086526-172.17.0.3-1731026113172:blk_1073741891_1075] {}] datanode.DataXceiver(331): 127.0.0.1:34377:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:47482 dst: /127.0.0.1:34377 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-08T00:35:55,357 WARN [Thread-1024 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36123,DS-6d4ce634-4f50-4215-ac28-47265c61cdfe,DISK] 2024-11-08T00:35:55,362 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40309 is added to blk_1073741892_1076 (size=8190) 2024-11-08T00:35:55,362 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34377 is added to blk_1073741892_1076 (size=8190) 2024-11-08T00:35:55,363 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=66 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b1f95ba699b0327bbfb9ea73b592e2f6/.tmp/info/222cea4eb5dc4e8299de8481e6753269 2024-11-08T00:35:55,371 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b1f95ba699b0327bbfb9ea73b592e2f6/.tmp/info/222cea4eb5dc4e8299de8481e6753269 as hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b1f95ba699b0327bbfb9ea73b592e2f6/info/222cea4eb5dc4e8299de8481e6753269 2024-11-08T00:35:55,378 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b1f95ba699b0327bbfb9ea73b592e2f6/info/222cea4eb5dc4e8299de8481e6753269, entries=3, sequenceid=66, filesize=8.0 K 2024-11-08T00:35:55,380 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7527, heapSize ~8.11 KB/8304, currentSize=9.46 KB/9683 for b1f95ba699b0327bbfb9ea73b592e2f6 in 31ms, sequenceid=66, compaction requested=false 2024-11-08T00:35:55,380 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for b1f95ba699b0327bbfb9ea73b592e2f6: 2024-11-08T00:35:55,380 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=25.7 K, sizeToCheck=16.0 K 2024-11-08T00:35:55,380 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-08T00:35:55,380 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b1f95ba699b0327bbfb9ea73b592e2f6/info/efe73729f82b4d268b8a7fbe28793a66 because midkey is the same as first or last row 2024-11-08T00:35:55,480 INFO [regionserver/3302f0f507bd:0.logRoller {}] wal.FSHLog(556): LowReplication-Roller was enabled. 2024-11-08T00:35:55,480 INFO [regionserver/3302f0f507bd:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33655,DS-e64dccec-6029-4220-b1b3-3165180151b4,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-08T00:35:55,571 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-08T00:35:55,572 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-08T00:35:55,572 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-08T00:35:55,572 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-08T00:35:55,573 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-08T00:35:55,573 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-08T00:35:55,573 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-08T00:35:55,573 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=85250775, stopped=false 2024-11-08T00:35:55,573 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=3302f0f507bd,33483,1731026115449 2024-11-08T00:35:55,624 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33483-0x10117de57140000, quorum=127.0.0.1:55789, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-08T00:35:55,625 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42513-0x10117de57140001, quorum=127.0.0.1:55789, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-08T00:35:55,625 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39151-0x10117de57140002, quorum=127.0.0.1:55789, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-08T00:35:55,625 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42513-0x10117de57140001, quorum=127.0.0.1:55789, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T00:35:55,625 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33483-0x10117de57140000, quorum=127.0.0.1:55789, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T00:35:55,625 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39151-0x10117de57140002, quorum=127.0.0.1:55789, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T00:35:55,625 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-08T00:35:55,625 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-08T00:35:55,625 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-08T00:35:55,626 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-08T00:35:55,626 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '3302f0f507bd,42513,1731026115632' ***** 2024-11-08T00:35:55,626 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-08T00:35:55,626 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '3302f0f507bd,39151,1731026116929' ***** 2024-11-08T00:35:55,626 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-08T00:35:55,626 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:39151-0x10117de57140002, quorum=127.0.0.1:55789, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-08T00:35:55,627 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:33483-0x10117de57140000, quorum=127.0.0.1:55789, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-08T00:35:55,627 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:42513-0x10117de57140001, quorum=127.0.0.1:55789, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-08T00:35:55,627 INFO [RS:0;3302f0f507bd:42513 {}] regionserver.HRegionServer(878): Closing user regions 2024-11-08T00:35:55,627 INFO [RS:0;3302f0f507bd:42513 {}] regionserver.HRegionServer(3091): Received CLOSE for b1f95ba699b0327bbfb9ea73b592e2f6 2024-11-08T00:35:55,628 DEBUG [RS_CLOSE_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing b1f95ba699b0327bbfb9ea73b592e2f6, disabling compactions & flushes 2024-11-08T00:35:55,628 INFO [RS:1;3302f0f507bd:39151 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-08T00:35:55,628 INFO [RS_CLOSE_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnDatanodeDeath,,1731026117146.b1f95ba699b0327bbfb9ea73b592e2f6. 2024-11-08T00:35:55,628 INFO [RS:1;3302f0f507bd:39151 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-08T00:35:55,628 DEBUG [RS_CLOSE_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1731026117146.b1f95ba699b0327bbfb9ea73b592e2f6. 2024-11-08T00:35:55,628 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-08T00:35:55,628 INFO [RS:1;3302f0f507bd:39151 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-08T00:35:55,628 DEBUG [RS_CLOSE_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1731026117146.b1f95ba699b0327bbfb9ea73b592e2f6. after waiting 0 ms 2024-11-08T00:35:55,628 INFO [RS:1;3302f0f507bd:39151 {}] regionserver.HRegionServer(959): stopping server 3302f0f507bd,39151,1731026116929 2024-11-08T00:35:55,629 DEBUG [RS_CLOSE_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnDatanodeDeath,,1731026117146.b1f95ba699b0327bbfb9ea73b592e2f6. 2024-11-08T00:35:55,629 INFO [RS:1;3302f0f507bd:39151 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-08T00:35:55,629 INFO [RS:1;3302f0f507bd:39151 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;3302f0f507bd:39151. 2024-11-08T00:35:55,629 INFO [RS_CLOSE_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing b1f95ba699b0327bbfb9ea73b592e2f6 1/1 column families, dataSize=9.46 KB heapSize=10.38 KB 2024-11-08T00:35:55,629 DEBUG [RS:1;3302f0f507bd:39151 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-08T00:35:55,629 DEBUG [RS:1;3302f0f507bd:39151 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-08T00:35:55,629 INFO [RS:1;3302f0f507bd:39151 {}] regionserver.HRegionServer(976): stopping server 3302f0f507bd,39151,1731026116929; all regions closed. 2024-11-08T00:35:55,630 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:35:55,630 INFO [RS:0;3302f0f507bd:42513 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-08T00:35:55,630 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-08T00:35:55,630 INFO [RS:0;3302f0f507bd:42513 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-08T00:35:55,630 INFO [RS:0;3302f0f507bd:42513 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-08T00:35:55,630 INFO [RS:0;3302f0f507bd:42513 {}] regionserver.HRegionServer(959): stopping server 3302f0f507bd,42513,1731026115632 2024-11-08T00:35:55,630 INFO [RS:0;3302f0f507bd:42513 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-08T00:35:55,630 INFO [RS:0;3302f0f507bd:42513 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;3302f0f507bd:42513. 2024-11-08T00:35:55,630 DEBUG [RS:0;3302f0f507bd:42513 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-08T00:35:55,630 DEBUG [RS:0;3302f0f507bd:42513 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-08T00:35:55,630 INFO [RS:0;3302f0f507bd:42513 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-08T00:35:55,632 INFO [RS:0;3302f0f507bd:42513 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-08T00:35:55,632 INFO [RS:0;3302f0f507bd:42513 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-08T00:35:55,632 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:35:55,632 INFO [RS:0;3302f0f507bd:42513 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-08T00:35:55,632 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:35:55,632 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:35:55,632 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:35:55,633 INFO [RS:0;3302f0f507bd:42513 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-08T00:35:55,633 DEBUG [RS:0;3302f0f507bd:42513 {}] regionserver.HRegionServer(1325): Online Regions={b1f95ba699b0327bbfb9ea73b592e2f6=TestLogRolling-testLogRollOnDatanodeDeath,,1731026117146.b1f95ba699b0327bbfb9ea73b592e2f6., 1588230740=hbase:meta,,1.1588230740} 2024-11-08T00:35:55,633 DEBUG [RS:0;3302f0f507bd:42513 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, b1f95ba699b0327bbfb9ea73b592e2f6 2024-11-08T00:35:55,633 DEBUG [RS_CLOSE_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-08T00:35:55,633 INFO [RS_CLOSE_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-08T00:35:55,633 DEBUG [RS_CLOSE_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-08T00:35:55,633 DEBUG [RS_CLOSE_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-08T00:35:55,633 DEBUG [RS_CLOSE_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-08T00:35:55,633 INFO [RS_CLOSE_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.71 KB heapSize=3.75 KB 2024-11-08T00:35:55,634 ERROR [FSHLog-0-hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8-prefix:3302f0f507bd,42513,1731026115632.meta {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33655,DS-e64dccec-6029-4220-b1b3-3165180151b4,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-08T00:35:55,634 WARN [FSHLog-0-hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8-prefix:3302f0f507bd,42513,1731026115632.meta {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33655,DS-e64dccec-6029-4220-b1b3-3165180151b4,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-08T00:35:55,634 DEBUG [regionserver/3302f0f507bd:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 3302f0f507bd%2C42513%2C1731026115632.meta:.meta(num 1731026116690) roll requested 2024-11-08T00:35:55,634 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33655,DS-e64dccec-6029-4220-b1b3-3165180151b4,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-08T00:35:55,634 INFO [regionserver/3302f0f507bd:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3302f0f507bd%2C42513%2C1731026115632.meta.1731026155634.meta 2024-11-08T00:35:55,634 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33655,DS-e64dccec-6029-4220-b1b3-3165180151b4,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-08T00:35:55,634 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,39151,1731026116929/3302f0f507bd%2C39151%2C1731026116929.1731026117246 2024-11-08T00:35:55,635 WARN [IPC Server handler 2 on default port 42193 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,39151,1731026116929/3302f0f507bd%2C39151%2C1731026116929.1731026117246 has not been closed. Lease recovery is in progress. RecoveryId = 1077 for block blk_1073741837_1013 2024-11-08T00:35:55,635 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,39151,1731026116929/3302f0f507bd%2C39151%2C1731026116929.1731026117246 after 1ms 2024-11-08T00:35:55,636 DEBUG [RS_CLOSE_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b1f95ba699b0327bbfb9ea73b592e2f6/.tmp/info/381bce8cc00a4e03b8c1069fef0b45cf is 1080, key is row0015/info:/1731026155349/Put/seqid=0 2024-11-08T00:35:55,637 WARN [Thread-1034 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741893_1078 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-08T00:35:55,637 WARN [Thread-1034 {}] hdfs.DataStreamer(1731): Error Recovery for BP-648086526-172.17.0.3-1731026113172:blk_1073741893_1078 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36123,DS-6d4ce634-4f50-4215-ac28-47265c61cdfe,DISK], DatanodeInfoWithStorage[127.0.0.1:34377,DS-79fac2fc-1df6-4762-84aa-e7ca600b7ca6,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36123,DS-6d4ce634-4f50-4215-ac28-47265c61cdfe,DISK]) is bad. 2024-11-08T00:35:55,637 WARN [Thread-1034 {}] hdfs.DataStreamer(1850): Abandoning BP-648086526-172.17.0.3-1731026113172:blk_1073741893_1078 2024-11-08T00:35:55,638 WARN [Thread-1034 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36123,DS-6d4ce634-4f50-4215-ac28-47265c61cdfe,DISK] 2024-11-08T00:35:55,638 WARN [Thread-1033 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741894_1079 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-08T00:35:55,638 WARN [Thread-1033 {}] hdfs.DataStreamer(1731): Error Recovery for BP-648086526-172.17.0.3-1731026113172:blk_1073741894_1079 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36123,DS-6d4ce634-4f50-4215-ac28-47265c61cdfe,DISK], DatanodeInfoWithStorage[127.0.0.1:40309,DS-5e11058e-8356-4495-ab75-8441e34f8bac,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36123,DS-6d4ce634-4f50-4215-ac28-47265c61cdfe,DISK]) is bad. 2024-11-08T00:35:55,638 WARN [Thread-1033 {}] hdfs.DataStreamer(1850): Abandoning BP-648086526-172.17.0.3-1731026113172:blk_1073741894_1079 2024-11-08T00:35:55,639 WARN [Thread-1033 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36123,DS-6d4ce634-4f50-4215-ac28-47265c61cdfe,DISK] 2024-11-08T00:35:55,645 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:35:55,645 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:35:55,646 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:35:55,646 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:35:55,646 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:35:55,646 INFO [regionserver/3302f0f507bd:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.meta.1731026116690.meta with entries=8, filesize=2.33 KB; new WAL /user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.meta.1731026155634.meta 2024-11-08T00:35:55,650 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33655,DS-e64dccec-6029-4220-b1b3-3165180151b4,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-08T00:35:55,650 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33655,DS-e64dccec-6029-4220-b1b3-3165180151b4,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-08T00:35:55,650 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.meta.1731026116690.meta 2024-11-08T00:35:55,650 WARN [IPC Server handler 4 on default port 42193 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.meta.1731026116690.meta has not been closed. Lease recovery is in progress. RecoveryId = 1082 for block blk_1073741834_1010 2024-11-08T00:35:55,651 DEBUG [regionserver/3302f0f507bd:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38691:38691),(127.0.0.1/127.0.0.1:42245:42245)] 2024-11-08T00:35:55,651 DEBUG [regionserver/3302f0f507bd:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.meta.1731026116690.meta is not closed yet, will try archiving it next time 2024-11-08T00:35:55,651 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.meta.1731026116690.meta after 1ms 2024-11-08T00:35:55,651 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34377 is added to blk_1073741896_1081 (size=14660) 2024-11-08T00:35:55,652 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40309 is added to blk_1073741896_1081 (size=14660) 2024-11-08T00:35:55,652 INFO [RS_CLOSE_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=9.46 KB at sequenceid=78 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b1f95ba699b0327bbfb9ea73b592e2f6/.tmp/info/381bce8cc00a4e03b8c1069fef0b45cf 2024-11-08T00:35:55,659 DEBUG [RS_CLOSE_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b1f95ba699b0327bbfb9ea73b592e2f6/.tmp/info/381bce8cc00a4e03b8c1069fef0b45cf as hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b1f95ba699b0327bbfb9ea73b592e2f6/info/381bce8cc00a4e03b8c1069fef0b45cf 2024-11-08T00:35:55,666 INFO [RS_CLOSE_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b1f95ba699b0327bbfb9ea73b592e2f6/info/381bce8cc00a4e03b8c1069fef0b45cf, entries=9, sequenceid=78, filesize=14.3 K 2024-11-08T00:35:55,667 INFO [RS_CLOSE_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~9.46 KB/9683, heapSize ~10.36 KB/10608, currentSize=0 B/0 for b1f95ba699b0327bbfb9ea73b592e2f6 in 38ms, sequenceid=78, compaction requested=true 2024-11-08T00:35:55,668 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731026117146.b1f95ba699b0327bbfb9ea73b592e2f6.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b1f95ba699b0327bbfb9ea73b592e2f6/info/5e10b90bede54919a84c7ff99a14fdbd, hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b1f95ba699b0327bbfb9ea73b592e2f6/info/4cd974cc34cc4f238120ca36f6c51ad1, hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b1f95ba699b0327bbfb9ea73b592e2f6/info/99135ae6026f431aa0d16ff93d243a7c, hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b1f95ba699b0327bbfb9ea73b592e2f6/info/1bc50aaa75644fa8a9d02c5e1eb07abc, hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b1f95ba699b0327bbfb9ea73b592e2f6/info/4de4937f0fd644f3b3b9a6bcaf7f1167, hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b1f95ba699b0327bbfb9ea73b592e2f6/info/4464110ba99a490385b87d51b4b441e5] to archive 2024-11-08T00:35:55,669 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731026117146.b1f95ba699b0327bbfb9ea73b592e2f6.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-08T00:35:55,670 DEBUG [RS_CLOSE_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/data/hbase/meta/1588230740/.tmp/info/e21f7b0d3f7f4df3a84442322942f38b is 203, key is TestLogRolling-testLogRollOnDatanodeDeath,,1731026117146.b1f95ba699b0327bbfb9ea73b592e2f6./info:regioninfo/1731026117511/Put/seqid=0 2024-11-08T00:35:55,671 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731026117146.b1f95ba699b0327bbfb9ea73b592e2f6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b1f95ba699b0327bbfb9ea73b592e2f6/info/5e10b90bede54919a84c7ff99a14fdbd to hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b1f95ba699b0327bbfb9ea73b592e2f6/info/5e10b90bede54919a84c7ff99a14fdbd 2024-11-08T00:35:55,672 WARN [Thread-1045 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741897_1083 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-08T00:35:55,672 WARN [Thread-1045 {}] hdfs.DataStreamer(1731): Error Recovery for BP-648086526-172.17.0.3-1731026113172:blk_1073741897_1083 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36123,DS-6d4ce634-4f50-4215-ac28-47265c61cdfe,DISK], DatanodeInfoWithStorage[127.0.0.1:34377,DS-79fac2fc-1df6-4762-84aa-e7ca600b7ca6,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36123,DS-6d4ce634-4f50-4215-ac28-47265c61cdfe,DISK]) is bad. 2024-11-08T00:35:55,672 WARN [Thread-1045 {}] hdfs.DataStreamer(1850): Abandoning BP-648086526-172.17.0.3-1731026113172:blk_1073741897_1083 2024-11-08T00:35:55,672 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731026117146.b1f95ba699b0327bbfb9ea73b592e2f6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b1f95ba699b0327bbfb9ea73b592e2f6/info/4cd974cc34cc4f238120ca36f6c51ad1 to hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b1f95ba699b0327bbfb9ea73b592e2f6/info/4cd974cc34cc4f238120ca36f6c51ad1 2024-11-08T00:35:55,673 WARN [Thread-1045 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36123,DS-6d4ce634-4f50-4215-ac28-47265c61cdfe,DISK] 2024-11-08T00:35:55,674 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731026117146.b1f95ba699b0327bbfb9ea73b592e2f6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b1f95ba699b0327bbfb9ea73b592e2f6/info/99135ae6026f431aa0d16ff93d243a7c to hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b1f95ba699b0327bbfb9ea73b592e2f6/info/99135ae6026f431aa0d16ff93d243a7c 2024-11-08T00:35:55,676 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731026117146.b1f95ba699b0327bbfb9ea73b592e2f6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b1f95ba699b0327bbfb9ea73b592e2f6/info/1bc50aaa75644fa8a9d02c5e1eb07abc to hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b1f95ba699b0327bbfb9ea73b592e2f6/info/1bc50aaa75644fa8a9d02c5e1eb07abc 2024-11-08T00:35:55,677 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731026117146.b1f95ba699b0327bbfb9ea73b592e2f6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b1f95ba699b0327bbfb9ea73b592e2f6/info/4de4937f0fd644f3b3b9a6bcaf7f1167 to hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b1f95ba699b0327bbfb9ea73b592e2f6/info/4de4937f0fd644f3b3b9a6bcaf7f1167 2024-11-08T00:35:55,679 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731026117146.b1f95ba699b0327bbfb9ea73b592e2f6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b1f95ba699b0327bbfb9ea73b592e2f6/info/4464110ba99a490385b87d51b4b441e5 to hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b1f95ba699b0327bbfb9ea73b592e2f6/info/4464110ba99a490385b87d51b4b441e5 2024-11-08T00:35:55,679 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731026117146.b1f95ba699b0327bbfb9ea73b592e2f6.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=3302f0f507bd:33483 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-11-08T00:35:55,680 WARN [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731026117146.b1f95ba699b0327bbfb9ea73b592e2f6.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [5e10b90bede54919a84c7ff99a14fdbd=10347, 4cd974cc34cc4f238120ca36f6c51ad1=12506, 99135ae6026f431aa0d16ff93d243a7c=17994, 1bc50aaa75644fa8a9d02c5e1eb07abc=6027, 4de4937f0fd644f3b3b9a6bcaf7f1167=6027, 4464110ba99a490385b87d51b4b441e5=6027] 2024-11-08T00:35:55,687 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40309 is added to blk_1073741898_1084 (size=7089) 2024-11-08T00:35:55,687 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34377 is added to blk_1073741898_1084 (size=7089) 2024-11-08T00:35:55,688 INFO [RS_CLOSE_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.50 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/data/hbase/meta/1588230740/.tmp/info/e21f7b0d3f7f4df3a84442322942f38b 2024-11-08T00:35:55,694 DEBUG [RS_CLOSE_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/b1f95ba699b0327bbfb9ea73b592e2f6/recovered.edits/81.seqid, newMaxSeqId=81, maxSeqId=1 2024-11-08T00:35:55,695 INFO [RS_CLOSE_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1731026117146.b1f95ba699b0327bbfb9ea73b592e2f6. 2024-11-08T00:35:55,695 DEBUG [RS_CLOSE_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for b1f95ba699b0327bbfb9ea73b592e2f6: Waiting for close lock at 1731026155628Running coprocessor pre-close hooks at 1731026155628Disabling compacts and flushes for region at 1731026155628Disabling writes for close at 1731026155628Obtaining lock to block concurrent updates at 1731026155629 (+1 ms)Preparing flush snapshotting stores in b1f95ba699b0327bbfb9ea73b592e2f6 at 1731026155629Finished memstore snapshotting TestLogRolling-testLogRollOnDatanodeDeath,,1731026117146.b1f95ba699b0327bbfb9ea73b592e2f6., syncing WAL and waiting on mvcc, flushsize=dataSize=9683, getHeapSize=10608, getOffHeapSize=0, getCellsCount=9 at 1731026155629Flushing stores of TestLogRolling-testLogRollOnDatanodeDeath,,1731026117146.b1f95ba699b0327bbfb9ea73b592e2f6. at 1731026155631 (+2 ms)Flushing b1f95ba699b0327bbfb9ea73b592e2f6/info: creating writer at 1731026155631Flushing b1f95ba699b0327bbfb9ea73b592e2f6/info: appending metadata at 1731026155636 (+5 ms)Flushing b1f95ba699b0327bbfb9ea73b592e2f6/info: closing flushed file at 1731026155636Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@a01c5ab: reopening flushed file at 1731026155658 (+22 ms)Finished flush of dataSize ~9.46 KB/9683, heapSize ~10.36 KB/10608, currentSize=0 B/0 for b1f95ba699b0327bbfb9ea73b592e2f6 in 38ms, sequenceid=78, compaction requested=true at 1731026155667 (+9 ms)Writing region close event to WAL at 1731026155685 (+18 ms)Running coprocessor post-close hooks at 1731026155694 (+9 ms)Closed at 1731026155695 (+1 ms) 2024-11-08T00:35:55,695 DEBUG [RS_CLOSE_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1731026117146.b1f95ba699b0327bbfb9ea73b592e2f6. 2024-11-08T00:35:55,712 DEBUG [RS_CLOSE_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/data/hbase/meta/1588230740/.tmp/ns/e6278ca804c44888932da22d45548dc5 is 43, key is default/ns:d/1731026116796/Put/seqid=0 2024-11-08T00:35:55,717 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34377 is added to blk_1073741899_1085 (size=5153) 2024-11-08T00:35:55,718 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40309 is added to blk_1073741899_1085 (size=5153) 2024-11-08T00:35:55,718 INFO [RS_CLOSE_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/data/hbase/meta/1588230740/.tmp/ns/e6278ca804c44888932da22d45548dc5 2024-11-08T00:35:55,742 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.1731026139453 to hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/oldWALs/3302f0f507bd%2C42513%2C1731026115632.1731026139453 2024-11-08T00:35:55,744 DEBUG [RS_CLOSE_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/data/hbase/meta/1588230740/.tmp/table/15020b81b3d14e3f83d663abda14c29f is 77, key is TestLogRolling-testLogRollOnDatanodeDeath/table:state/1731026117524/Put/seqid=0 2024-11-08T00:35:55,749 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34377 is added to blk_1073741900_1086 (size=5424) 2024-11-08T00:35:55,749 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40309 is added to blk_1073741900_1086 (size=5424) 2024-11-08T00:35:55,750 INFO [RS_CLOSE_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=146 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/data/hbase/meta/1588230740/.tmp/table/15020b81b3d14e3f83d663abda14c29f 2024-11-08T00:35:55,758 DEBUG [RS_CLOSE_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/data/hbase/meta/1588230740/.tmp/info/e21f7b0d3f7f4df3a84442322942f38b as hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/data/hbase/meta/1588230740/info/e21f7b0d3f7f4df3a84442322942f38b 2024-11-08T00:35:55,765 INFO [RS_CLOSE_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/data/hbase/meta/1588230740/info/e21f7b0d3f7f4df3a84442322942f38b, entries=10, sequenceid=11, filesize=6.9 K 2024-11-08T00:35:55,766 DEBUG [RS_CLOSE_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/data/hbase/meta/1588230740/.tmp/ns/e6278ca804c44888932da22d45548dc5 as hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/data/hbase/meta/1588230740/ns/e6278ca804c44888932da22d45548dc5 2024-11-08T00:35:55,774 INFO [RS_CLOSE_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/data/hbase/meta/1588230740/ns/e6278ca804c44888932da22d45548dc5, entries=2, sequenceid=11, filesize=5.0 K 2024-11-08T00:35:55,775 DEBUG [RS_CLOSE_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/data/hbase/meta/1588230740/.tmp/table/15020b81b3d14e3f83d663abda14c29f as hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/data/hbase/meta/1588230740/table/15020b81b3d14e3f83d663abda14c29f 2024-11-08T00:35:55,783 INFO [RS_CLOSE_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/data/hbase/meta/1588230740/table/15020b81b3d14e3f83d663abda14c29f, entries=2, sequenceid=11, filesize=5.3 K 2024-11-08T00:35:55,784 INFO [RS_CLOSE_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.71 KB/1752, heapSize ~3.45 KB/3536, currentSize=0 B/0 for 1588230740 in 151ms, sequenceid=11, compaction requested=false 2024-11-08T00:35:55,789 DEBUG [RS_CLOSE_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-08T00:35:55,790 DEBUG [RS_CLOSE_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-08T00:35:55,790 INFO [RS_CLOSE_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-08T00:35:55,790 DEBUG [RS_CLOSE_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731026155633Running coprocessor pre-close hooks at 1731026155633Disabling compacts and flushes for region at 1731026155633Disabling writes for close at 1731026155633Obtaining lock to block concurrent updates at 1731026155633Preparing flush snapshotting stores in 1588230740 at 1731026155633Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1752, getHeapSize=3776, getOffHeapSize=0, getCellsCount=14 at 1731026155634 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1731026155651 (+17 ms)Flushing 1588230740/info: creating writer at 1731026155651Flushing 1588230740/info: appending metadata at 1731026155669 (+18 ms)Flushing 1588230740/info: closing flushed file at 1731026155669Flushing 1588230740/ns: creating writer at 1731026155695 (+26 ms)Flushing 1588230740/ns: appending metadata at 1731026155711 (+16 ms)Flushing 1588230740/ns: closing flushed file at 1731026155711Flushing 1588230740/table: creating writer at 1731026155725 (+14 ms)Flushing 1588230740/table: appending metadata at 1731026155743 (+18 ms)Flushing 1588230740/table: closing flushed file at 1731026155743Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1800390c: reopening flushed file at 1731026155757 (+14 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@29dc6f0a: reopening flushed file at 1731026155765 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@612306b4: reopening flushed file at 1731026155774 (+9 ms)Finished flush of dataSize ~1.71 KB/1752, heapSize ~3.45 KB/3536, currentSize=0 B/0 for 1588230740 in 151ms, sequenceid=11, compaction requested=false at 1731026155784 (+10 ms)Writing region close event to WAL at 1731026155785 (+1 ms)Running coprocessor post-close hooks at 1731026155790 (+5 ms)Closed at 1731026155790 2024-11-08T00:35:55,790 DEBUG [RS_CLOSE_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-08T00:35:55,833 INFO [RS:0;3302f0f507bd:42513 {}] regionserver.HRegionServer(976): stopping server 3302f0f507bd,42513,1731026115632; all regions closed. 2024-11-08T00:35:55,834 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:35:55,834 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:35:55,834 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:35:55,834 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:35:55,834 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:35:55,836 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34377 is added to blk_1073741895_1080 (size=825) 2024-11-08T00:35:55,836 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40309 is added to blk_1073741895_1080 (size=825) 2024-11-08T00:35:56,108 INFO [regionserver/3302f0f507bd:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-08T00:35:56,108 INFO [regionserver/3302f0f507bd:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-08T00:35:56,158 INFO [regionserver/3302f0f507bd:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-08T00:35:56,234 INFO [regionserver/3302f0f507bd:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-08T00:35:56,235 INFO [regionserver/3302f0f507bd:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-08T00:35:57,110 INFO [regionserver/3302f0f507bd:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-08T00:35:57,276 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@348e1135[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:40309, datanodeUuid=1be0d67d-0789-4e98-b5fc-be9cc512b3fb, infoPort=38691, infoSecurePort=0, ipcPort=33685, storageInfo=lv=-57;cid=testClusterID;nsid=771899493;c=1731026113172):Failed to transfer BP-648086526-172.17.0.3-1731026113172:blk_1073741832_1008 to 127.0.0.1:36123 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-08T00:35:57,280 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34377 is added to blk_1073741836_1012 (size=76) 2024-11-08T00:35:57,562 INFO [master/3302f0f507bd:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-08T00:35:57,562 INFO [master/3302f0f507bd:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-08T00:35:58,274 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@65bf7c3e[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:40309, datanodeUuid=1be0d67d-0789-4e98-b5fc-be9cc512b3fb, infoPort=38691, infoSecurePort=0, ipcPort=33685, storageInfo=lv=-57;cid=testClusterID;nsid=771899493;c=1731026113172):Failed to transfer BP-648086526-172.17.0.3-1731026113172:blk_1073741828_1004 to 127.0.0.1:36123 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-08T00:35:58,274 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@348e1135[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:40309, datanodeUuid=1be0d67d-0789-4e98-b5fc-be9cc512b3fb, infoPort=38691, infoSecurePort=0, ipcPort=33685, storageInfo=lv=-57;cid=testClusterID;nsid=771899493;c=1731026113172):Failed to transfer BP-648086526-172.17.0.3-1731026113172:blk_1073741826_1002 to 127.0.0.1:36123 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-08T00:35:59,081 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40309 is added to blk_1073741877_1060 (size=12911) 2024-11-08T00:35:59,277 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34377 is added to blk_1073741829_1005 (size=34) 2024-11-08T00:35:59,278 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34377 is added to blk_1073741827_1003 (size=196) 2024-11-08T00:35:59,636 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,39151,1731026116929/3302f0f507bd%2C39151%2C1731026116929.1731026117246 after 4002ms 2024-11-08T00:35:59,652 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.meta.1731026116690.meta after 4002ms 2024-11-08T00:36:00,277 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34377 is added to blk_1073741825_1001 (size=7) 2024-11-08T00:36:00,634 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-11-08T00:36:00,637 DEBUG [RS:1;3302f0f507bd:39151 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/oldWALs 2024-11-08T00:36:00,637 INFO [RS:1;3302f0f507bd:39151 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 3302f0f507bd%2C39151%2C1731026116929:(num 1731026117246) 2024-11-08T00:36:00,637 DEBUG [RS:1;3302f0f507bd:39151 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-08T00:36:00,637 INFO [RS:1;3302f0f507bd:39151 {}] regionserver.LeaseManager(133): Closed leases 2024-11-08T00:36:00,637 INFO [RS:1;3302f0f507bd:39151 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-08T00:36:00,637 INFO [RS:1;3302f0f507bd:39151 {}] hbase.ChoreService(370): Chore service for: regionserver/3302f0f507bd:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-08T00:36:00,638 INFO [RS:1;3302f0f507bd:39151 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-08T00:36:00,638 INFO [regionserver/3302f0f507bd:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-08T00:36:00,638 INFO [RS:1;3302f0f507bd:39151 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-08T00:36:00,638 INFO [RS:1;3302f0f507bd:39151 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-08T00:36:00,638 INFO [RS:1;3302f0f507bd:39151 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-08T00:36:00,638 INFO [RS:1;3302f0f507bd:39151 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:39151 2024-11-08T00:36:00,642 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,39151,1731026116929/3302f0f507bd%2C39151%2C1731026116929.1731026117246 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.FileNotFoundException: File does not exist: /user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,39151,1731026116929/3302f0f507bd%2C39151%2C1731026116929.1731026117246 at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.ipc.RemoteException.instantiateException(RemoteException.java:121) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.RemoteException.unwrapRemoteException(RemoteException.java:88) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1812) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more Caused by: org.apache.hadoop.ipc.RemoteException: File does not exist: /user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,39151,1731026116929/3302f0f507bd%2C39151%2C1731026116929.1731026117246 at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$isFileClosed$57(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.isFileClosed(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1810) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:36:00,696 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:36:00,708 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39151-0x10117de57140002, quorum=127.0.0.1:55789, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/3302f0f507bd,39151,1731026116929 2024-11-08T00:36:00,708 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33483-0x10117de57140000, quorum=127.0.0.1:55789, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-08T00:36:00,708 INFO [RS:1;3302f0f507bd:39151 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-08T00:36:00,719 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:36:00,719 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [3302f0f507bd,39151,1731026116929] 2024-11-08T00:36:00,719 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:36:00,720 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:36:00,720 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:36:00,720 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:36:00,729 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/3302f0f507bd,39151,1731026116929 already deleted, retry=false 2024-11-08T00:36:00,729 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 3302f0f507bd,39151,1731026116929 expired; onlineServers=1 2024-11-08T00:36:00,731 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:36:00,731 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:36:00,819 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39151-0x10117de57140002, quorum=127.0.0.1:55789, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-08T00:36:00,819 INFO [RS:1;3302f0f507bd:39151 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-08T00:36:00,819 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39151-0x10117de57140002, quorum=127.0.0.1:55789, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-08T00:36:00,819 INFO [RS:1;3302f0f507bd:39151 {}] regionserver.HRegionServer(1031): Exiting; stopping=3302f0f507bd,39151,1731026116929; zookeeper connection closed. 2024-11-08T00:36:00,819 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@3c15ba12 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@3c15ba12 2024-11-08T00:36:00,834 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-11-08T00:36:00,839 DEBUG [RS:0;3302f0f507bd:42513 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/oldWALs 2024-11-08T00:36:00,839 INFO [RS:0;3302f0f507bd:42513 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 3302f0f507bd%2C42513%2C1731026115632.meta:.meta(num 1731026155634) 2024-11-08T00:36:00,840 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:36:00,840 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:36:00,840 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:36:00,840 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:36:00,840 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:36:00,843 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34377 is added to blk_1073741890_1074 (size=14682) 2024-11-08T00:36:00,843 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40309 is added to blk_1073741890_1074 (size=14682) 2024-11-08T00:36:00,846 DEBUG [RS:0;3302f0f507bd:42513 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/oldWALs 2024-11-08T00:36:00,846 INFO [RS:0;3302f0f507bd:42513 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 3302f0f507bd%2C42513%2C1731026115632:(num 1731026155326) 2024-11-08T00:36:00,846 DEBUG [RS:0;3302f0f507bd:42513 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-08T00:36:00,847 INFO [RS:0;3302f0f507bd:42513 {}] regionserver.LeaseManager(133): Closed leases 2024-11-08T00:36:00,847 INFO [RS:0;3302f0f507bd:42513 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-08T00:36:00,847 INFO [RS:0;3302f0f507bd:42513 {}] hbase.ChoreService(370): Chore service for: regionserver/3302f0f507bd:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-08T00:36:00,847 INFO [RS:0;3302f0f507bd:42513 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-08T00:36:00,847 INFO [regionserver/3302f0f507bd:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-08T00:36:00,847 INFO [RS:0;3302f0f507bd:42513 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:42513 2024-11-08T00:36:00,855 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33483-0x10117de57140000, quorum=127.0.0.1:55789, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-08T00:36:00,855 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42513-0x10117de57140001, quorum=127.0.0.1:55789, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/3302f0f507bd,42513,1731026115632 2024-11-08T00:36:00,856 INFO [RS:0;3302f0f507bd:42513 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-08T00:36:00,856 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [3302f0f507bd,42513,1731026115632] 2024-11-08T00:36:00,876 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/3302f0f507bd,42513,1731026115632 already deleted, retry=false 2024-11-08T00:36:00,877 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 3302f0f507bd,42513,1731026115632 expired; onlineServers=0 2024-11-08T00:36:00,877 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '3302f0f507bd,33483,1731026115449' ***** 2024-11-08T00:36:00,877 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-08T00:36:00,877 INFO [M:0;3302f0f507bd:33483 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-08T00:36:00,877 INFO [M:0;3302f0f507bd:33483 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-08T00:36:00,877 DEBUG [M:0;3302f0f507bd:33483 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-08T00:36:00,877 DEBUG [M:0;3302f0f507bd:33483 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-08T00:36:00,877 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-08T00:36:00,877 DEBUG [master/3302f0f507bd:0:becomeActiveMaster-HFileCleaner.small.0-1731026116018 {}] cleaner.HFileCleaner(306): Exit Thread[master/3302f0f507bd:0:becomeActiveMaster-HFileCleaner.small.0-1731026116018,5,FailOnTimeoutGroup] 2024-11-08T00:36:00,877 DEBUG [master/3302f0f507bd:0:becomeActiveMaster-HFileCleaner.large.0-1731026116018 {}] cleaner.HFileCleaner(306): Exit Thread[master/3302f0f507bd:0:becomeActiveMaster-HFileCleaner.large.0-1731026116018,5,FailOnTimeoutGroup] 2024-11-08T00:36:00,877 INFO [M:0;3302f0f507bd:33483 {}] hbase.ChoreService(370): Chore service for: master/3302f0f507bd:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-08T00:36:00,878 INFO [M:0;3302f0f507bd:33483 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-08T00:36:00,878 DEBUG [M:0;3302f0f507bd:33483 {}] master.HMaster(1795): Stopping service threads 2024-11-08T00:36:00,878 INFO [M:0;3302f0f507bd:33483 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-08T00:36:00,878 INFO [M:0;3302f0f507bd:33483 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-08T00:36:00,878 INFO [M:0;3302f0f507bd:33483 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-08T00:36:00,878 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-08T00:36:00,887 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33483-0x10117de57140000, quorum=127.0.0.1:55789, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-08T00:36:00,887 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33483-0x10117de57140000, quorum=127.0.0.1:55789, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T00:36:00,887 DEBUG [M:0;3302f0f507bd:33483 {}] zookeeper.ZKUtil(347): master:33483-0x10117de57140000, quorum=127.0.0.1:55789, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-08T00:36:00,887 WARN [M:0;3302f0f507bd:33483 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-08T00:36:00,888 INFO [M:0;3302f0f507bd:33483 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/.lastflushedseqids 2024-11-08T00:36:00,894 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34377 is added to blk_1073741901_1087 (size=130) 2024-11-08T00:36:00,894 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40309 is added to blk_1073741901_1087 (size=130) 2024-11-08T00:36:00,895 INFO [M:0;3302f0f507bd:33483 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-08T00:36:00,895 INFO [M:0;3302f0f507bd:33483 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-08T00:36:00,895 DEBUG [M:0;3302f0f507bd:33483 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-08T00:36:00,895 INFO [M:0;3302f0f507bd:33483 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-08T00:36:00,895 DEBUG [M:0;3302f0f507bd:33483 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-08T00:36:00,895 DEBUG [M:0;3302f0f507bd:33483 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-08T00:36:00,895 DEBUG [M:0;3302f0f507bd:33483 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-08T00:36:00,896 INFO [M:0;3302f0f507bd:33483 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.26 KB heapSize=29.50 KB 2024-11-08T00:36:00,913 DEBUG [M:0;3302f0f507bd:33483 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/80151cd6dfe84bd7a4de6effd8d15bfb is 82, key is hbase:meta,,1/info:regioninfo/1731026116722/Put/seqid=0 2024-11-08T00:36:00,921 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40309 is added to blk_1073741902_1088 (size=5672) 2024-11-08T00:36:00,921 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34377 is added to blk_1073741902_1088 (size=5672) 2024-11-08T00:36:00,921 INFO [M:0;3302f0f507bd:33483 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/80151cd6dfe84bd7a4de6effd8d15bfb 2024-11-08T00:36:00,944 DEBUG [M:0;3302f0f507bd:33483 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/169dbae7810548a49801de773eb96b69 is 775, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731026117530/Put/seqid=0 2024-11-08T00:36:00,949 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34377 is added to blk_1073741903_1089 (size=6256) 2024-11-08T00:36:00,949 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40309 is added to blk_1073741903_1089 (size=6256) 2024-11-08T00:36:00,950 INFO [M:0;3302f0f507bd:33483 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.59 KB at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/169dbae7810548a49801de773eb96b69 2024-11-08T00:36:00,955 INFO [M:0;3302f0f507bd:33483 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 169dbae7810548a49801de773eb96b69 2024-11-08T00:36:00,966 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42513-0x10117de57140001, quorum=127.0.0.1:55789, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-08T00:36:00,966 INFO [RS:0;3302f0f507bd:42513 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-08T00:36:00,966 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42513-0x10117de57140001, quorum=127.0.0.1:55789, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-08T00:36:00,966 INFO [RS:0;3302f0f507bd:42513 {}] regionserver.HRegionServer(1031): Exiting; stopping=3302f0f507bd,42513,1731026115632; zookeeper connection closed. 2024-11-08T00:36:00,966 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@6b6c3025 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@6b6c3025 2024-11-08T00:36:00,967 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 2 regionserver(s) complete 2024-11-08T00:36:00,974 DEBUG [M:0;3302f0f507bd:33483 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/6af1133bb0a64d929da536e1eb343f89 is 69, key is 3302f0f507bd,39151,1731026116929/rs:state/1731026117007/Put/seqid=0 2024-11-08T00:36:00,979 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34377 is added to blk_1073741904_1090 (size=5224) 2024-11-08T00:36:00,979 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40309 is added to blk_1073741904_1090 (size=5224) 2024-11-08T00:36:00,979 INFO [M:0;3302f0f507bd:33483 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=130 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/6af1133bb0a64d929da536e1eb343f89 2024-11-08T00:36:01,001 DEBUG [M:0;3302f0f507bd:33483 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/e32059a4293a496084785ffb10b45ed5 is 52, key is load_balancer_on/state:d/1731026116909/Put/seqid=0 2024-11-08T00:36:01,006 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40309 is added to blk_1073741905_1091 (size=5056) 2024-11-08T00:36:01,006 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34377 is added to blk_1073741905_1091 (size=5056) 2024-11-08T00:36:01,007 INFO [M:0;3302f0f507bd:33483 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/e32059a4293a496084785ffb10b45ed5 2024-11-08T00:36:01,012 DEBUG [M:0;3302f0f507bd:33483 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/80151cd6dfe84bd7a4de6effd8d15bfb as hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/80151cd6dfe84bd7a4de6effd8d15bfb 2024-11-08T00:36:01,017 INFO [M:0;3302f0f507bd:33483 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/80151cd6dfe84bd7a4de6effd8d15bfb, entries=8, sequenceid=60, filesize=5.5 K 2024-11-08T00:36:01,018 DEBUG [M:0;3302f0f507bd:33483 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/169dbae7810548a49801de773eb96b69 as hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/169dbae7810548a49801de773eb96b69 2024-11-08T00:36:01,024 INFO [M:0;3302f0f507bd:33483 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 169dbae7810548a49801de773eb96b69 2024-11-08T00:36:01,024 INFO [M:0;3302f0f507bd:33483 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/169dbae7810548a49801de773eb96b69, entries=6, sequenceid=60, filesize=6.1 K 2024-11-08T00:36:01,025 DEBUG [M:0;3302f0f507bd:33483 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/6af1133bb0a64d929da536e1eb343f89 as hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/6af1133bb0a64d929da536e1eb343f89 2024-11-08T00:36:01,031 INFO [M:0;3302f0f507bd:33483 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/6af1133bb0a64d929da536e1eb343f89, entries=2, sequenceid=60, filesize=5.1 K 2024-11-08T00:36:01,032 DEBUG [M:0;3302f0f507bd:33483 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/e32059a4293a496084785ffb10b45ed5 as hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/e32059a4293a496084785ffb10b45ed5 2024-11-08T00:36:01,038 INFO [M:0;3302f0f507bd:33483 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/e32059a4293a496084785ffb10b45ed5, entries=1, sequenceid=60, filesize=4.9 K 2024-11-08T00:36:01,040 INFO [M:0;3302f0f507bd:33483 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.26 KB/23817, heapSize ~29.44 KB/30144, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 144ms, sequenceid=60, compaction requested=false 2024-11-08T00:36:01,041 INFO [M:0;3302f0f507bd:33483 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-08T00:36:01,041 DEBUG [M:0;3302f0f507bd:33483 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731026160895Disabling compacts and flushes for region at 1731026160895Disabling writes for close at 1731026160895Obtaining lock to block concurrent updates at 1731026160896 (+1 ms)Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731026160896Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23817, getHeapSize=30144, getOffHeapSize=0, getCellsCount=71 at 1731026160896Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731026160897 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731026160897Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731026160913 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731026160913Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731026160928 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731026160943 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731026160943Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731026160956 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731026160973 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731026160973Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731026160984 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731026161001 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731026161001Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5dc1ea42: reopening flushed file at 1731026161011 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4e96be61: reopening flushed file at 1731026161018 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7860f9f5: reopening flushed file at 1731026161024 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@48a9fa72: reopening flushed file at 1731026161031 (+7 ms)Finished flush of dataSize ~23.26 KB/23817, heapSize ~29.44 KB/30144, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 144ms, sequenceid=60, compaction requested=false at 1731026161040 (+9 ms)Writing region close event to WAL at 1731026161041 (+1 ms)Closed at 1731026161041 2024-11-08T00:36:01,042 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:36:01,043 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:36:01,043 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:36:01,043 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:36:01,043 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:36:01,045 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34377 is added to blk_1073741888_1071 (size=1045) 2024-11-08T00:36:01,045 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40309 is added to blk_1073741888_1071 (size=1045) 2024-11-08T00:36:01,234 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-08T00:36:01,256 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:36:01,257 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:36:01,257 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:36:01,258 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:36:01,258 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:36:01,263 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:36:01,264 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:36:01,267 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:36:01,643 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,39151,1731026116929/3302f0f507bd%2C39151%2C1731026116929.1731026117246 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:36:01,653 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.meta.1731026116690.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:36:01,966 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath 2024-11-08T00:36:01,966 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-08T00:36:01,967 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-08T00:36:01,967 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-08T00:36:02,299 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@17f32c6d {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-648086526-172.17.0.3-1731026113172:blk_1073741830_1006, datanode=DatanodeInfoWithStorage[127.0.0.1:33655,null,null]) java.net.ConnectException: Call From 3302f0f507bd/172.17.0.3 to localhost:35163 failed on connection exception: java.net.ConnectException: Connection refused; For more details see: http://wiki.apache.org/hadoop/ConnectionRefused at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:876) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:668) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-11-08T00:36:02,645 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,39151,1731026116929/3302f0f507bd%2C39151%2C1731026116929.1731026117246 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:36:02,654 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.meta.1731026116690.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:36:03,042 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/MasterData/WALs/3302f0f507bd,33483,1731026115449/3302f0f507bd%2C33483%2C1731026115449.1731026115799 to hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/MasterData/oldWALs/3302f0f507bd%2C33483%2C1731026115449.1731026115799 2024-11-08T00:36:03,045 INFO [WAL-Archive-0 {}] region.MasterRegionUtils(50): Moved hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/MasterData/oldWALs/3302f0f507bd%2C33483%2C1731026115449.1731026115799 to hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/oldWALs/3302f0f507bd%2C33483%2C1731026115449.1731026115799$masterlocalwal$ 2024-11-08T00:36:03,045 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-08T00:36:03,045 INFO [M:0;3302f0f507bd:33483 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-08T00:36:03,046 INFO [M:0;3302f0f507bd:33483 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:33483 2024-11-08T00:36:03,046 INFO [M:0;3302f0f507bd:33483 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-08T00:36:03,172 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33483-0x10117de57140000, quorum=127.0.0.1:55789, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-08T00:36:03,173 INFO [M:0;3302f0f507bd:33483 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-08T00:36:03,173 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33483-0x10117de57140000, quorum=127.0.0.1:55789, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-08T00:36:03,176 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1400d7c{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-08T00:36:03,177 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@122a196d{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-08T00:36:03,177 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-08T00:36:03,177 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1b1c2893{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-08T00:36:03,177 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@557202e0{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/33fe3ad3-2016-67dd-bf13-d3c46fb5d9ce/hadoop.log.dir/,STOPPED} 2024-11-08T00:36:03,179 WARN [BP-648086526-172.17.0.3-1731026113172 heartbeating to localhost/127.0.0.1:42193 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-08T00:36:03,179 WARN [BP-648086526-172.17.0.3-1731026113172 heartbeating to localhost/127.0.0.1:42193 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-648086526-172.17.0.3-1731026113172 (Datanode Uuid 1be0d67d-0789-4e98-b5fc-be9cc512b3fb) service to localhost/127.0.0.1:42193 2024-11-08T00:36:03,179 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-08T00:36:03,179 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-08T00:36:03,179 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@6f07e091 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-648086526-172.17.0.3-1731026113172:blk_1073741837_1013, datanode=DatanodeInfoWithStorage[127.0.0.1:33655,null,null]) java.io.InterruptedIOException: DestHost:destPort localhost:35163 , LocalHost:localPort 3302f0f507bd/172.17.0.3:0. Failed on local exception: java.io.InterruptedIOException: Interrupted: action=RetryAction(action=RETRY, delayMillis=1000, reason=retries get failed due to exceeded maximum allowed retries number: 10), retry policy=RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1000 MILLISECONDS) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:936) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.InterruptedIOException: Interrupted: action=RetryAction(action=RETRY, delayMillis=1000, reason=retries get failed due to exceeded maximum allowed retries number: 10), retry policy=RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1000 MILLISECONDS) at org.apache.hadoop.ipc.Client$Connection.handleConnectionFailure(Client.java:963) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:691) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more Caused by: java.lang.InterruptedException: sleep interrupted at java.lang.Thread.sleep(Native Method) ~[?:?] at org.apache.hadoop.ipc.Client$Connection.handleConnectionFailure(Client.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:691) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-11-08T00:36:03,180 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@6f07e091 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-648086526-172.17.0.3-1731026113172:blk_1073741837_1013, datanode=DatanodeInfoWithStorage[127.0.0.1:40309,null,null]) java.io.IOException: No block pool offer service for bpid=BP-648086526-172.17.0.3-1731026113172 at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.getDatanodeID(BlockRecoveryWorker.java:539) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$000(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:131) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-08T00:36:03,180 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/33fe3ad3-2016-67dd-bf13-d3c46fb5d9ce/cluster_94d4fa05-7ba4-b350-d218-42d4a2c67eba/data/data3/current/BP-648086526-172.17.0.3-1731026113172 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-08T00:36:03,180 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@6f07e091 {}] datanode.BlockRecoveryWorker$1(605): recover Block: RecoveringBlock{BP-648086526-172.17.0.3-1731026113172:blk_1073741837_1013; getBlockSize()=85; corrupt=false; offset=-1; locs=[DatanodeInfoWithStorage[127.0.0.1:33655,null,null], DatanodeInfoWithStorage[127.0.0.1:40309,null,null]]; cachedLocs=[]} FAILED: java.io.IOException: All datanodes failed: block=BP-648086526-172.17.0.3-1731026113172:blk_1073741837_1013, datanodeids=[DatanodeInfoWithStorage[127.0.0.1:33655,null,null], DatanodeInfoWithStorage[127.0.0.1:40309,null,null]] 2024-11-08T00:36:03,180 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@6f07e091 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-648086526-172.17.0.3-1731026113172:blk_1073741834_1010, datanode=DatanodeInfoWithStorage[127.0.0.1:40309,null,null]) java.io.IOException: No block pool offer service for bpid=BP-648086526-172.17.0.3-1731026113172 at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.getDatanodeID(BlockRecoveryWorker.java:539) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$000(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:131) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-08T00:36:03,181 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/33fe3ad3-2016-67dd-bf13-d3c46fb5d9ce/cluster_94d4fa05-7ba4-b350-d218-42d4a2c67eba/data/data4/current/BP-648086526-172.17.0.3-1731026113172 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-08T00:36:03,181 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@6f07e091 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-648086526-172.17.0.3-1731026113172:blk_1073741834_1010, datanode=DatanodeInfoWithStorage[127.0.0.1:33655,null,null]) java.io.IOException: No block pool offer service for bpid=BP-648086526-172.17.0.3-1731026113172 at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.getDatanodeID(BlockRecoveryWorker.java:539) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$000(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:131) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-08T00:36:03,181 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-08T00:36:03,181 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@6f07e091 {}] datanode.BlockRecoveryWorker$1(605): recover Block: RecoveringBlock{BP-648086526-172.17.0.3-1731026113172:blk_1073741834_1010; getBlockSize()=85; corrupt=false; offset=-1; locs=[DatanodeInfoWithStorage[127.0.0.1:40309,null,null], DatanodeInfoWithStorage[127.0.0.1:33655,null,null]]; cachedLocs=[]} FAILED: java.io.IOException: All datanodes failed: block=BP-648086526-172.17.0.3-1731026113172:blk_1073741834_1010, datanodeids=[DatanodeInfoWithStorage[127.0.0.1:40309,null,null], DatanodeInfoWithStorage[127.0.0.1:33655,null,null]] 2024-11-08T00:36:03,184 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@325d7ee2{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-08T00:36:03,184 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@15751333{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-08T00:36:03,184 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-08T00:36:03,184 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7c141b19{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-08T00:36:03,184 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7ab1ed71{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/33fe3ad3-2016-67dd-bf13-d3c46fb5d9ce/hadoop.log.dir/,STOPPED} 2024-11-08T00:36:03,194 WARN [BP-648086526-172.17.0.3-1731026113172 heartbeating to localhost/127.0.0.1:42193 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-08T00:36:03,194 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-08T00:36:03,194 WARN [BP-648086526-172.17.0.3-1731026113172 heartbeating to localhost/127.0.0.1:42193 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-648086526-172.17.0.3-1731026113172 (Datanode Uuid 9f14824b-723a-48c9-a12a-a80f2a62b14a) service to localhost/127.0.0.1:42193 2024-11-08T00:36:03,194 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-08T00:36:03,195 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/33fe3ad3-2016-67dd-bf13-d3c46fb5d9ce/cluster_94d4fa05-7ba4-b350-d218-42d4a2c67eba/data/data5/current/BP-648086526-172.17.0.3-1731026113172 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-08T00:36:03,195 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/33fe3ad3-2016-67dd-bf13-d3c46fb5d9ce/cluster_94d4fa05-7ba4-b350-d218-42d4a2c67eba/data/data6/current/BP-648086526-172.17.0.3-1731026113172 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-08T00:36:03,195 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-08T00:36:03,201 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7982676d{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-08T00:36:03,201 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2efbdc75{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-08T00:36:03,201 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-08T00:36:03,201 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1d790455{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-08T00:36:03,201 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3150e6db{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/33fe3ad3-2016-67dd-bf13-d3c46fb5d9ce/hadoop.log.dir/,STOPPED} 2024-11-08T00:36:03,209 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-08T00:36:03,245 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-08T00:36:03,254 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnDatanodeDeath Thread=157 (was 82) Potentially hanging thread: HMaster-EventLoopGroup-5-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.2@localhost:42193 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-6 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-15-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:42193 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-14-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:42193 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:33309 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-14-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:42193 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$901/0x00007f2c70bf4db8.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-5 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-9-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:42193 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-9-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-9 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:42193 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-14-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-11 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-10 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:42193 from jenkins.hfs.2 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-8 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-15-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'DataNode' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:42193 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:42193 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-15-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$901/0x00007f2c70bf4db8.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-7 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:42193 from jenkins.hfs.3 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-20-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.1@localhost:33309 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.3@localhost:42193 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=448 (was 402) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=190 (was 120) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=6320 (was 6927) 2024-11-08T00:36:03,262 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnPipelineRestart Thread=157, OpenFileDescriptor=448, MaxFileDescriptor=1048576, SystemLoadAverage=190, ProcessCount=11, AvailableMemoryMB=6320 2024-11-08T00:36:03,262 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-08T00:36:03,262 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/33fe3ad3-2016-67dd-bf13-d3c46fb5d9ce/hadoop.log.dir so I do NOT create it in target/test-data/a5199574-9c10-014b-fdba-002f0e9cd2f6 2024-11-08T00:36:03,262 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/33fe3ad3-2016-67dd-bf13-d3c46fb5d9ce/hadoop.tmp.dir so I do NOT create it in target/test-data/a5199574-9c10-014b-fdba-002f0e9cd2f6 2024-11-08T00:36:03,263 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a5199574-9c10-014b-fdba-002f0e9cd2f6/cluster_bb62c712-dc1b-a57d-173a-c6e86e8d7958, deleteOnExit=true 2024-11-08T00:36:03,263 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-08T00:36:03,263 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a5199574-9c10-014b-fdba-002f0e9cd2f6/test.cache.data in system properties and HBase conf 2024-11-08T00:36:03,263 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a5199574-9c10-014b-fdba-002f0e9cd2f6/hadoop.tmp.dir in system properties and HBase conf 2024-11-08T00:36:03,263 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a5199574-9c10-014b-fdba-002f0e9cd2f6/hadoop.log.dir in system properties and HBase conf 2024-11-08T00:36:03,263 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a5199574-9c10-014b-fdba-002f0e9cd2f6/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-08T00:36:03,263 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a5199574-9c10-014b-fdba-002f0e9cd2f6/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-08T00:36:03,263 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-08T00:36:03,263 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-08T00:36:03,264 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a5199574-9c10-014b-fdba-002f0e9cd2f6/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-08T00:36:03,264 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a5199574-9c10-014b-fdba-002f0e9cd2f6/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-08T00:36:03,264 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a5199574-9c10-014b-fdba-002f0e9cd2f6/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-08T00:36:03,264 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a5199574-9c10-014b-fdba-002f0e9cd2f6/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-08T00:36:03,264 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a5199574-9c10-014b-fdba-002f0e9cd2f6/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-08T00:36:03,264 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a5199574-9c10-014b-fdba-002f0e9cd2f6/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-08T00:36:03,264 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a5199574-9c10-014b-fdba-002f0e9cd2f6/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-08T00:36:03,264 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a5199574-9c10-014b-fdba-002f0e9cd2f6/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-08T00:36:03,264 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a5199574-9c10-014b-fdba-002f0e9cd2f6/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-08T00:36:03,264 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a5199574-9c10-014b-fdba-002f0e9cd2f6/nfs.dump.dir in system properties and HBase conf 2024-11-08T00:36:03,264 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a5199574-9c10-014b-fdba-002f0e9cd2f6/java.io.tmpdir in system properties and HBase conf 2024-11-08T00:36:03,264 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a5199574-9c10-014b-fdba-002f0e9cd2f6/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-08T00:36:03,265 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a5199574-9c10-014b-fdba-002f0e9cd2f6/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-08T00:36:03,265 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a5199574-9c10-014b-fdba-002f0e9cd2f6/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-08T00:36:03,278 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-08T00:36:03,645 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,39151,1731026116929/3302f0f507bd%2C39151%2C1731026116929.1731026117246 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:36:03,654 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.meta.1731026116690.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:36:03,743 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-08T00:36:03,748 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-08T00:36:03,750 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-08T00:36:03,750 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-08T00:36:03,750 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-08T00:36:03,751 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-08T00:36:03,752 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5c07fc0{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a5199574-9c10-014b-fdba-002f0e9cd2f6/hadoop.log.dir/,AVAILABLE} 2024-11-08T00:36:03,752 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@61f9169f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-08T00:36:03,858 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4360f0f4{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a5199574-9c10-014b-fdba-002f0e9cd2f6/java.io.tmpdir/jetty-localhost-41549-hadoop-hdfs-3_4_1-tests_jar-_-any-4093161106485150860/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-08T00:36:03,859 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4bc294e4{HTTP/1.1, (http/1.1)}{localhost:41549} 2024-11-08T00:36:03,859 INFO [Time-limited test {}] server.Server(415): Started @157956ms 2024-11-08T00:36:03,872 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-08T00:36:04,135 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-08T00:36:04,139 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-08T00:36:04,140 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-08T00:36:04,140 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-08T00:36:04,140 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-08T00:36:04,140 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@60043bb8{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a5199574-9c10-014b-fdba-002f0e9cd2f6/hadoop.log.dir/,AVAILABLE} 2024-11-08T00:36:04,141 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@123783d2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-08T00:36:04,259 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3192c1d0{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a5199574-9c10-014b-fdba-002f0e9cd2f6/java.io.tmpdir/jetty-localhost-33839-hadoop-hdfs-3_4_1-tests_jar-_-any-16705844997586252957/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-08T00:36:04,260 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@89ebf29{HTTP/1.1, (http/1.1)}{localhost:33839} 2024-11-08T00:36:04,260 INFO [Time-limited test {}] server.Server(415): Started @158358ms 2024-11-08T00:36:04,262 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-08T00:36:04,290 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-08T00:36:04,294 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-08T00:36:04,295 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-08T00:36:04,295 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-08T00:36:04,295 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-08T00:36:04,296 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@e7f214b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a5199574-9c10-014b-fdba-002f0e9cd2f6/hadoop.log.dir/,AVAILABLE} 2024-11-08T00:36:04,297 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@54dbaae8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-08T00:36:04,402 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@bd6b006{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a5199574-9c10-014b-fdba-002f0e9cd2f6/java.io.tmpdir/jetty-localhost-36969-hadoop-hdfs-3_4_1-tests_jar-_-any-3745862563645045912/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-08T00:36:04,403 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@277eda4d{HTTP/1.1, (http/1.1)}{localhost:36969} 2024-11-08T00:36:04,403 INFO [Time-limited test {}] server.Server(415): Started @158500ms 2024-11-08T00:36:04,404 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-08T00:36:04,646 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,39151,1731026116929/3302f0f507bd%2C39151%2C1731026116929.1731026117246 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:36:04,655 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.meta.1731026116690.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:36:05,262 WARN [Thread-1194 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a5199574-9c10-014b-fdba-002f0e9cd2f6/cluster_bb62c712-dc1b-a57d-173a-c6e86e8d7958/data/data1/current/BP-1358657480-172.17.0.3-1731026163290/current, will proceed with Du for space computation calculation, 2024-11-08T00:36:05,263 WARN [Thread-1195 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a5199574-9c10-014b-fdba-002f0e9cd2f6/cluster_bb62c712-dc1b-a57d-173a-c6e86e8d7958/data/data2/current/BP-1358657480-172.17.0.3-1731026163290/current, will proceed with Du for space computation calculation, 2024-11-08T00:36:05,281 WARN [Thread-1158 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-08T00:36:05,283 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xb5a77596466fd0db with lease ID 0xa60536ce372f035: Processing first storage report for DS-45f9592c-a91b-4f06-84fe-85833bab1282 from datanode DatanodeRegistration(127.0.0.1:43193, datanodeUuid=55ce6167-532a-40a5-9fd7-373da715fc23, infoPort=41001, infoSecurePort=0, ipcPort=39475, storageInfo=lv=-57;cid=testClusterID;nsid=1907776356;c=1731026163290) 2024-11-08T00:36:05,283 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb5a77596466fd0db with lease ID 0xa60536ce372f035: from storage DS-45f9592c-a91b-4f06-84fe-85833bab1282 node DatanodeRegistration(127.0.0.1:43193, datanodeUuid=55ce6167-532a-40a5-9fd7-373da715fc23, infoPort=41001, infoSecurePort=0, ipcPort=39475, storageInfo=lv=-57;cid=testClusterID;nsid=1907776356;c=1731026163290), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-08T00:36:05,283 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xb5a77596466fd0db with lease ID 0xa60536ce372f035: Processing first storage report for DS-0d8848cf-4113-44a6-a763-ac3543e62810 from datanode DatanodeRegistration(127.0.0.1:43193, datanodeUuid=55ce6167-532a-40a5-9fd7-373da715fc23, infoPort=41001, infoSecurePort=0, ipcPort=39475, storageInfo=lv=-57;cid=testClusterID;nsid=1907776356;c=1731026163290) 2024-11-08T00:36:05,283 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb5a77596466fd0db with lease ID 0xa60536ce372f035: from storage DS-0d8848cf-4113-44a6-a763-ac3543e62810 node DatanodeRegistration(127.0.0.1:43193, datanodeUuid=55ce6167-532a-40a5-9fd7-373da715fc23, infoPort=41001, infoSecurePort=0, ipcPort=39475, storageInfo=lv=-57;cid=testClusterID;nsid=1907776356;c=1731026163290), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-08T00:36:05,411 WARN [Thread-1205 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a5199574-9c10-014b-fdba-002f0e9cd2f6/cluster_bb62c712-dc1b-a57d-173a-c6e86e8d7958/data/data3/current/BP-1358657480-172.17.0.3-1731026163290/current, will proceed with Du for space computation calculation, 2024-11-08T00:36:05,411 WARN [Thread-1206 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a5199574-9c10-014b-fdba-002f0e9cd2f6/cluster_bb62c712-dc1b-a57d-173a-c6e86e8d7958/data/data4/current/BP-1358657480-172.17.0.3-1731026163290/current, will proceed with Du for space computation calculation, 2024-11-08T00:36:05,432 WARN [Thread-1181 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-08T00:36:05,434 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xa95c67e3fcff4e72 with lease ID 0xa60536ce372f036: Processing first storage report for DS-d33fbd53-d237-460d-9d3e-81af0606845b from datanode DatanodeRegistration(127.0.0.1:42655, datanodeUuid=44ed9313-edd8-4d80-8a5d-c2753b809252, infoPort=37295, infoSecurePort=0, ipcPort=44327, storageInfo=lv=-57;cid=testClusterID;nsid=1907776356;c=1731026163290) 2024-11-08T00:36:05,434 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa95c67e3fcff4e72 with lease ID 0xa60536ce372f036: from storage DS-d33fbd53-d237-460d-9d3e-81af0606845b node DatanodeRegistration(127.0.0.1:42655, datanodeUuid=44ed9313-edd8-4d80-8a5d-c2753b809252, infoPort=37295, infoSecurePort=0, ipcPort=44327, storageInfo=lv=-57;cid=testClusterID;nsid=1907776356;c=1731026163290), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-08T00:36:05,434 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xa95c67e3fcff4e72 with lease ID 0xa60536ce372f036: Processing first storage report for DS-3af8b3ce-cfd2-47b9-9084-f54afdc69111 from datanode DatanodeRegistration(127.0.0.1:42655, datanodeUuid=44ed9313-edd8-4d80-8a5d-c2753b809252, infoPort=37295, infoSecurePort=0, ipcPort=44327, storageInfo=lv=-57;cid=testClusterID;nsid=1907776356;c=1731026163290) 2024-11-08T00:36:05,434 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa95c67e3fcff4e72 with lease ID 0xa60536ce372f036: from storage DS-3af8b3ce-cfd2-47b9-9084-f54afdc69111 node DatanodeRegistration(127.0.0.1:42655, datanodeUuid=44ed9313-edd8-4d80-8a5d-c2753b809252, infoPort=37295, infoSecurePort=0, ipcPort=44327, storageInfo=lv=-57;cid=testClusterID;nsid=1907776356;c=1731026163290), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-08T00:36:05,436 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a5199574-9c10-014b-fdba-002f0e9cd2f6 2024-11-08T00:36:05,439 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a5199574-9c10-014b-fdba-002f0e9cd2f6/cluster_bb62c712-dc1b-a57d-173a-c6e86e8d7958/zookeeper_0, clientPort=60383, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a5199574-9c10-014b-fdba-002f0e9cd2f6/cluster_bb62c712-dc1b-a57d-173a-c6e86e8d7958/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a5199574-9c10-014b-fdba-002f0e9cd2f6/cluster_bb62c712-dc1b-a57d-173a-c6e86e8d7958/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-08T00:36:05,440 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=60383 2024-11-08T00:36:05,440 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-08T00:36:05,441 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-08T00:36:05,450 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42655 is added to blk_1073741825_1001 (size=7) 2024-11-08T00:36:05,451 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43193 is added to blk_1073741825_1001 (size=7) 2024-11-08T00:36:05,452 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:34225/user/jenkins/test-data/a76a474e-2f29-615f-1108-03e596125458 with version=8 2024-11-08T00:36:05,452 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:45125/user/jenkins/test-data/a52118f7-c961-aacf-3827-5de0e737d868/hbase-staging 2024-11-08T00:36:05,455 INFO [Time-limited test {}] client.ConnectionUtils(128): master/3302f0f507bd:0 server-side Connection retries=45 2024-11-08T00:36:05,455 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-08T00:36:05,455 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-08T00:36:05,455 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-08T00:36:05,455 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-08T00:36:05,455 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-08T00:36:05,455 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-08T00:36:05,456 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-08T00:36:05,456 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:39533 2024-11-08T00:36:05,458 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:39533 connecting to ZooKeeper ensemble=127.0.0.1:60383 2024-11-08T00:36:05,504 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:395330x0, quorum=127.0.0.1:60383, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-08T00:36:05,505 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:39533-0x10117df1a690000 connected 2024-11-08T00:36:05,592 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-08T00:36:05,594 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-08T00:36:05,597 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:39533-0x10117df1a690000, quorum=127.0.0.1:60383, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-08T00:36:05,597 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:34225/user/jenkins/test-data/a76a474e-2f29-615f-1108-03e596125458, hbase.cluster.distributed=false 2024-11-08T00:36:05,599 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:39533-0x10117df1a690000, quorum=127.0.0.1:60383, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-08T00:36:05,600 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=39533 2024-11-08T00:36:05,600 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=39533 2024-11-08T00:36:05,601 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=39533 2024-11-08T00:36:05,601 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=39533 2024-11-08T00:36:05,601 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=39533 2024-11-08T00:36:05,617 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/3302f0f507bd:0 server-side Connection retries=45 2024-11-08T00:36:05,617 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-08T00:36:05,617 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-08T00:36:05,617 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-08T00:36:05,617 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-08T00:36:05,617 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-08T00:36:05,617 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-08T00:36:05,617 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-08T00:36:05,618 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:45577 2024-11-08T00:36:05,619 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:45577 connecting to ZooKeeper ensemble=127.0.0.1:60383 2024-11-08T00:36:05,620 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-08T00:36:05,621 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-08T00:36:05,634 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:455770x0, quorum=127.0.0.1:60383, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-08T00:36:05,635 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:455770x0, quorum=127.0.0.1:60383, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-08T00:36:05,635 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:45577-0x10117df1a690001 connected 2024-11-08T00:36:05,635 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-08T00:36:05,636 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-08T00:36:05,636 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45577-0x10117df1a690001, quorum=127.0.0.1:60383, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-08T00:36:05,638 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45577-0x10117df1a690001, quorum=127.0.0.1:60383, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-08T00:36:05,638 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=45577 2024-11-08T00:36:05,638 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=45577 2024-11-08T00:36:05,639 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=45577 2024-11-08T00:36:05,639 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=45577 2024-11-08T00:36:05,639 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=45577 2024-11-08T00:36:05,647 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,39151,1731026116929/3302f0f507bd%2C39151%2C1731026116929.1731026117246 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:36:05,653 DEBUG [M:0;3302f0f507bd:39533 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;3302f0f507bd:39533 2024-11-08T00:36:05,653 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/3302f0f507bd,39533,1731026165454 2024-11-08T00:36:05,655 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.meta.1731026116690.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:36:05,666 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45577-0x10117df1a690001, quorum=127.0.0.1:60383, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-08T00:36:05,666 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39533-0x10117df1a690000, quorum=127.0.0.1:60383, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-08T00:36:05,667 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:39533-0x10117df1a690000, quorum=127.0.0.1:60383, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/3302f0f507bd,39533,1731026165454 2024-11-08T00:36:05,676 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45577-0x10117df1a690001, quorum=127.0.0.1:60383, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-08T00:36:05,676 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39533-0x10117df1a690000, quorum=127.0.0.1:60383, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T00:36:05,676 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45577-0x10117df1a690001, quorum=127.0.0.1:60383, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T00:36:05,677 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:39533-0x10117df1a690000, quorum=127.0.0.1:60383, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-08T00:36:05,677 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/3302f0f507bd,39533,1731026165454 from backup master directory 2024-11-08T00:36:05,687 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45577-0x10117df1a690001, quorum=127.0.0.1:60383, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-08T00:36:05,687 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39533-0x10117df1a690000, quorum=127.0.0.1:60383, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/3302f0f507bd,39533,1731026165454 2024-11-08T00:36:05,687 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39533-0x10117df1a690000, quorum=127.0.0.1:60383, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-08T00:36:05,687 WARN [master/3302f0f507bd:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-08T00:36:05,687 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=3302f0f507bd,39533,1731026165454 2024-11-08T00:36:05,691 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:34225/user/jenkins/test-data/a76a474e-2f29-615f-1108-03e596125458/hbase.id] with ID: 3a4ce26e-62ba-45f6-b72e-ace8867ad309 2024-11-08T00:36:05,692 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:34225/user/jenkins/test-data/a76a474e-2f29-615f-1108-03e596125458/.tmp/hbase.id 2024-11-08T00:36:05,697 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43193 is added to blk_1073741826_1002 (size=42) 2024-11-08T00:36:05,697 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42655 is added to blk_1073741826_1002 (size=42) 2024-11-08T00:36:05,698 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:34225/user/jenkins/test-data/a76a474e-2f29-615f-1108-03e596125458/.tmp/hbase.id]:[hdfs://localhost:34225/user/jenkins/test-data/a76a474e-2f29-615f-1108-03e596125458/hbase.id] 2024-11-08T00:36:05,712 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-08T00:36:05,712 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-08T00:36:05,714 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-11-08T00:36:05,725 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39533-0x10117df1a690000, quorum=127.0.0.1:60383, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T00:36:05,725 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45577-0x10117df1a690001, quorum=127.0.0.1:60383, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T00:36:05,731 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42655 is added to blk_1073741827_1003 (size=196) 2024-11-08T00:36:05,732 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43193 is added to blk_1073741827_1003 (size=196) 2024-11-08T00:36:06,134 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-08T00:36:06,135 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-08T00:36:06,135 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-08T00:36:06,148 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43193 is added to blk_1073741828_1004 (size=1189) 2024-11-08T00:36:06,148 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42655 is added to blk_1073741828_1004 (size=1189) 2024-11-08T00:36:06,149 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:34225/user/jenkins/test-data/a76a474e-2f29-615f-1108-03e596125458/MasterData/data/master/store 2024-11-08T00:36:06,155 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42655 is added to blk_1073741829_1005 (size=34) 2024-11-08T00:36:06,155 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43193 is added to blk_1073741829_1005 (size=34) 2024-11-08T00:36:06,156 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-08T00:36:06,156 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-08T00:36:06,156 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-08T00:36:06,156 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-08T00:36:06,157 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-08T00:36:06,157 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-08T00:36:06,157 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-08T00:36:06,157 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731026166156Disabling compacts and flushes for region at 1731026166156Disabling writes for close at 1731026166157 (+1 ms)Writing region close event to WAL at 1731026166157Closed at 1731026166157 2024-11-08T00:36:06,158 WARN [master/3302f0f507bd:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:34225/user/jenkins/test-data/a76a474e-2f29-615f-1108-03e596125458/MasterData/data/master/store/.initializing 2024-11-08T00:36:06,158 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:34225/user/jenkins/test-data/a76a474e-2f29-615f-1108-03e596125458/MasterData/WALs/3302f0f507bd,39533,1731026165454 2024-11-08T00:36:06,160 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=3302f0f507bd%2C39533%2C1731026165454, suffix=, logDir=hdfs://localhost:34225/user/jenkins/test-data/a76a474e-2f29-615f-1108-03e596125458/MasterData/WALs/3302f0f507bd,39533,1731026165454, archiveDir=hdfs://localhost:34225/user/jenkins/test-data/a76a474e-2f29-615f-1108-03e596125458/MasterData/oldWALs, maxLogs=10 2024-11-08T00:36:06,161 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3302f0f507bd%2C39533%2C1731026165454.1731026166160 2024-11-08T00:36:06,167 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/a76a474e-2f29-615f-1108-03e596125458/MasterData/WALs/3302f0f507bd,39533,1731026165454/3302f0f507bd%2C39533%2C1731026165454.1731026166160 2024-11-08T00:36:06,168 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37295:37295),(127.0.0.1/127.0.0.1:41001:41001)] 2024-11-08T00:36:06,182 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-08T00:36:06,182 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-08T00:36:06,183 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-08T00:36:06,183 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-08T00:36:06,188 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-08T00:36:06,190 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-08T00:36:06,190 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T00:36:06,191 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-08T00:36:06,191 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-08T00:36:06,192 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-08T00:36:06,192 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T00:36:06,193 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-08T00:36:06,193 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-08T00:36:06,194 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-08T00:36:06,194 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T00:36:06,195 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-08T00:36:06,195 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-08T00:36:06,196 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-08T00:36:06,196 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T00:36:06,197 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-08T00:36:06,197 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-08T00:36:06,198 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34225/user/jenkins/test-data/a76a474e-2f29-615f-1108-03e596125458/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-08T00:36:06,198 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34225/user/jenkins/test-data/a76a474e-2f29-615f-1108-03e596125458/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-08T00:36:06,200 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-08T00:36:06,200 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-08T00:36:06,200 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-08T00:36:06,201 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-08T00:36:06,203 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34225/user/jenkins/test-data/a76a474e-2f29-615f-1108-03e596125458/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-08T00:36:06,204 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=708550, jitterRate=-0.09903329610824585}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-08T00:36:06,204 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731026166183Initializing all the Stores at 1731026166184 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731026166184Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731026166188 (+4 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731026166188Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731026166188Cleaning up temporary data from old regions at 1731026166200 (+12 ms)Region opened successfully at 1731026166204 (+4 ms) 2024-11-08T00:36:06,205 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-08T00:36:06,208 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@22a95db8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=3302f0f507bd/172.17.0.3:0 2024-11-08T00:36:06,208 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-08T00:36:06,209 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-08T00:36:06,209 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-08T00:36:06,209 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-08T00:36:06,209 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-08T00:36:06,210 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-08T00:36:06,210 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-08T00:36:06,213 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-08T00:36:06,214 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39533-0x10117df1a690000, quorum=127.0.0.1:60383, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-08T00:36:06,266 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-08T00:36:06,266 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-08T00:36:06,267 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39533-0x10117df1a690000, quorum=127.0.0.1:60383, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-08T00:36:06,276 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-08T00:36:06,277 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-08T00:36:06,278 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39533-0x10117df1a690000, quorum=127.0.0.1:60383, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-08T00:36:06,287 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-08T00:36:06,288 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39533-0x10117df1a690000, quorum=127.0.0.1:60383, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-08T00:36:06,297 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-08T00:36:06,300 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39533-0x10117df1a690000, quorum=127.0.0.1:60383, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-08T00:36:06,308 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-08T00:36:06,318 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45577-0x10117df1a690001, quorum=127.0.0.1:60383, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-08T00:36:06,318 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39533-0x10117df1a690000, quorum=127.0.0.1:60383, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-08T00:36:06,318 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39533-0x10117df1a690000, quorum=127.0.0.1:60383, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T00:36:06,318 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45577-0x10117df1a690001, quorum=127.0.0.1:60383, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T00:36:06,319 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=3302f0f507bd,39533,1731026165454, sessionid=0x10117df1a690000, setting cluster-up flag (Was=false) 2024-11-08T00:36:06,339 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39533-0x10117df1a690000, quorum=127.0.0.1:60383, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T00:36:06,339 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45577-0x10117df1a690001, quorum=127.0.0.1:60383, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T00:36:06,371 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-08T00:36:06,372 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=3302f0f507bd,39533,1731026165454 2024-11-08T00:36:06,487 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45577-0x10117df1a690001, quorum=127.0.0.1:60383, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T00:36:06,487 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39533-0x10117df1a690000, quorum=127.0.0.1:60383, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T00:36:06,518 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-08T00:36:06,520 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=3302f0f507bd,39533,1731026165454 2024-11-08T00:36:06,521 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:34225/user/jenkins/test-data/a76a474e-2f29-615f-1108-03e596125458/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-08T00:36:06,523 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-08T00:36:06,523 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-08T00:36:06,524 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-08T00:36:06,524 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 3302f0f507bd,39533,1731026165454 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-08T00:36:06,526 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/3302f0f507bd:0, corePoolSize=5, maxPoolSize=5 2024-11-08T00:36:06,526 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/3302f0f507bd:0, corePoolSize=5, maxPoolSize=5 2024-11-08T00:36:06,526 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/3302f0f507bd:0, corePoolSize=5, maxPoolSize=5 2024-11-08T00:36:06,526 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/3302f0f507bd:0, corePoolSize=5, maxPoolSize=5 2024-11-08T00:36:06,526 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/3302f0f507bd:0, corePoolSize=10, maxPoolSize=10 2024-11-08T00:36:06,526 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/3302f0f507bd:0, corePoolSize=1, maxPoolSize=1 2024-11-08T00:36:06,526 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/3302f0f507bd:0, corePoolSize=2, maxPoolSize=2 2024-11-08T00:36:06,526 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/3302f0f507bd:0, corePoolSize=1, maxPoolSize=1 2024-11-08T00:36:06,529 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731026196529 2024-11-08T00:36:06,529 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-08T00:36:06,529 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-08T00:36:06,529 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-08T00:36:06,529 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-08T00:36:06,529 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-08T00:36:06,529 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-08T00:36:06,529 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-08T00:36:06,529 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-08T00:36:06,530 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-08T00:36:06,530 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-08T00:36:06,530 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-08T00:36:06,530 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-08T00:36:06,530 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T00:36:06,531 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-08T00:36:06,531 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-08T00:36:06,531 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-08T00:36:06,531 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/3302f0f507bd:0:becomeActiveMaster-HFileCleaner.large.0-1731026166531,5,FailOnTimeoutGroup] 2024-11-08T00:36:06,531 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/3302f0f507bd:0:becomeActiveMaster-HFileCleaner.small.0-1731026166531,5,FailOnTimeoutGroup] 2024-11-08T00:36:06,531 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-08T00:36:06,531 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-08T00:36:06,531 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-08T00:36:06,531 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-08T00:36:06,545 INFO [RS:0;3302f0f507bd:45577 {}] regionserver.HRegionServer(746): ClusterId : 3a4ce26e-62ba-45f6-b72e-ace8867ad309 2024-11-08T00:36:06,545 DEBUG [RS:0;3302f0f507bd:45577 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-08T00:36:06,546 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42655 is added to blk_1073741831_1007 (size=1321) 2024-11-08T00:36:06,547 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43193 is added to blk_1073741831_1007 (size=1321) 2024-11-08T00:36:06,548 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:34225/user/jenkins/test-data/a76a474e-2f29-615f-1108-03e596125458/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-08T00:36:06,548 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:34225/user/jenkins/test-data/a76a474e-2f29-615f-1108-03e596125458 2024-11-08T00:36:06,557 DEBUG [RS:0;3302f0f507bd:45577 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-08T00:36:06,557 DEBUG [RS:0;3302f0f507bd:45577 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-08T00:36:06,558 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42655 is added to blk_1073741832_1008 (size=32) 2024-11-08T00:36:06,559 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43193 is added to blk_1073741832_1008 (size=32) 2024-11-08T00:36:06,559 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-08T00:36:06,560 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-08T00:36:06,562 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-08T00:36:06,562 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T00:36:06,562 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-08T00:36:06,563 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-08T00:36:06,564 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-08T00:36:06,564 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T00:36:06,565 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-08T00:36:06,565 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-08T00:36:06,566 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-08T00:36:06,566 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T00:36:06,566 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-08T00:36:06,567 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-08T00:36:06,567 DEBUG [RS:0;3302f0f507bd:45577 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-08T00:36:06,568 DEBUG [RS:0;3302f0f507bd:45577 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4814e226, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=3302f0f507bd/172.17.0.3:0 2024-11-08T00:36:06,568 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-08T00:36:06,568 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T00:36:06,568 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-08T00:36:06,569 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-08T00:36:06,569 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34225/user/jenkins/test-data/a76a474e-2f29-615f-1108-03e596125458/data/hbase/meta/1588230740 2024-11-08T00:36:06,569 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34225/user/jenkins/test-data/a76a474e-2f29-615f-1108-03e596125458/data/hbase/meta/1588230740 2024-11-08T00:36:06,570 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-08T00:36:06,571 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-08T00:36:06,571 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-08T00:36:06,572 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-08T00:36:06,578 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34225/user/jenkins/test-data/a76a474e-2f29-615f-1108-03e596125458/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-08T00:36:06,579 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=846924, jitterRate=0.07692064344882965}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-08T00:36:06,579 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731026166559Initializing all the Stores at 1731026166560 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731026166560Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731026166560Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731026166560Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731026166560Cleaning up temporary data from old regions at 1731026166571 (+11 ms)Region opened successfully at 1731026166579 (+8 ms) 2024-11-08T00:36:06,579 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-08T00:36:06,579 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-08T00:36:06,579 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-08T00:36:06,579 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-08T00:36:06,579 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-08T00:36:06,580 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-08T00:36:06,580 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731026166579Disabling compacts and flushes for region at 1731026166579Disabling writes for close at 1731026166579Writing region close event to WAL at 1731026166580 (+1 ms)Closed at 1731026166580 2024-11-08T00:36:06,581 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-08T00:36:06,581 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-08T00:36:06,582 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-08T00:36:06,583 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-08T00:36:06,584 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-08T00:36:06,590 DEBUG [RS:0;3302f0f507bd:45577 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;3302f0f507bd:45577 2024-11-08T00:36:06,590 INFO [RS:0;3302f0f507bd:45577 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-08T00:36:06,590 INFO [RS:0;3302f0f507bd:45577 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-08T00:36:06,590 DEBUG [RS:0;3302f0f507bd:45577 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-08T00:36:06,590 INFO [RS:0;3302f0f507bd:45577 {}] regionserver.HRegionServer(2659): reportForDuty to master=3302f0f507bd,39533,1731026165454 with port=45577, startcode=1731026165616 2024-11-08T00:36:06,591 DEBUG [RS:0;3302f0f507bd:45577 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-08T00:36:06,593 INFO [HMaster-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:39627, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.4 (auth:SIMPLE), service=RegionServerStatusService 2024-11-08T00:36:06,593 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39533 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 3302f0f507bd,45577,1731026165616 2024-11-08T00:36:06,593 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39533 {}] master.ServerManager(517): Registering regionserver=3302f0f507bd,45577,1731026165616 2024-11-08T00:36:06,595 DEBUG [RS:0;3302f0f507bd:45577 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:34225/user/jenkins/test-data/a76a474e-2f29-615f-1108-03e596125458 2024-11-08T00:36:06,595 DEBUG [RS:0;3302f0f507bd:45577 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:34225 2024-11-08T00:36:06,595 DEBUG [RS:0;3302f0f507bd:45577 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-08T00:36:06,602 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39533-0x10117df1a690000, quorum=127.0.0.1:60383, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-08T00:36:06,603 DEBUG [RS:0;3302f0f507bd:45577 {}] zookeeper.ZKUtil(111): regionserver:45577-0x10117df1a690001, quorum=127.0.0.1:60383, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/3302f0f507bd,45577,1731026165616 2024-11-08T00:36:06,603 WARN [RS:0;3302f0f507bd:45577 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-08T00:36:06,603 INFO [RS:0;3302f0f507bd:45577 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-08T00:36:06,603 DEBUG [RS:0;3302f0f507bd:45577 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:34225/user/jenkins/test-data/a76a474e-2f29-615f-1108-03e596125458/WALs/3302f0f507bd,45577,1731026165616 2024-11-08T00:36:06,603 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [3302f0f507bd,45577,1731026165616] 2024-11-08T00:36:06,607 INFO [RS:0;3302f0f507bd:45577 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-08T00:36:06,609 INFO [RS:0;3302f0f507bd:45577 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-08T00:36:06,610 INFO [RS:0;3302f0f507bd:45577 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-08T00:36:06,610 INFO [RS:0;3302f0f507bd:45577 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-08T00:36:06,610 INFO [RS:0;3302f0f507bd:45577 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-08T00:36:06,611 INFO [RS:0;3302f0f507bd:45577 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-08T00:36:06,611 INFO [RS:0;3302f0f507bd:45577 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-08T00:36:06,611 DEBUG [RS:0;3302f0f507bd:45577 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/3302f0f507bd:0, corePoolSize=1, maxPoolSize=1 2024-11-08T00:36:06,611 DEBUG [RS:0;3302f0f507bd:45577 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/3302f0f507bd:0, corePoolSize=1, maxPoolSize=1 2024-11-08T00:36:06,611 DEBUG [RS:0;3302f0f507bd:45577 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/3302f0f507bd:0, corePoolSize=1, maxPoolSize=1 2024-11-08T00:36:06,611 DEBUG [RS:0;3302f0f507bd:45577 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/3302f0f507bd:0, corePoolSize=1, maxPoolSize=1 2024-11-08T00:36:06,611 DEBUG [RS:0;3302f0f507bd:45577 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/3302f0f507bd:0, corePoolSize=1, maxPoolSize=1 2024-11-08T00:36:06,612 DEBUG [RS:0;3302f0f507bd:45577 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/3302f0f507bd:0, corePoolSize=2, maxPoolSize=2 2024-11-08T00:36:06,612 DEBUG [RS:0;3302f0f507bd:45577 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/3302f0f507bd:0, corePoolSize=1, maxPoolSize=1 2024-11-08T00:36:06,612 DEBUG [RS:0;3302f0f507bd:45577 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/3302f0f507bd:0, corePoolSize=1, maxPoolSize=1 2024-11-08T00:36:06,612 DEBUG [RS:0;3302f0f507bd:45577 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/3302f0f507bd:0, corePoolSize=1, maxPoolSize=1 2024-11-08T00:36:06,612 DEBUG [RS:0;3302f0f507bd:45577 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/3302f0f507bd:0, corePoolSize=1, maxPoolSize=1 2024-11-08T00:36:06,612 DEBUG [RS:0;3302f0f507bd:45577 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/3302f0f507bd:0, corePoolSize=1, maxPoolSize=1 2024-11-08T00:36:06,612 DEBUG [RS:0;3302f0f507bd:45577 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/3302f0f507bd:0, corePoolSize=1, maxPoolSize=1 2024-11-08T00:36:06,612 DEBUG [RS:0;3302f0f507bd:45577 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/3302f0f507bd:0, corePoolSize=3, maxPoolSize=3 2024-11-08T00:36:06,612 DEBUG [RS:0;3302f0f507bd:45577 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/3302f0f507bd:0, corePoolSize=3, maxPoolSize=3 2024-11-08T00:36:06,613 INFO [RS:0;3302f0f507bd:45577 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-08T00:36:06,613 INFO [RS:0;3302f0f507bd:45577 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-08T00:36:06,613 INFO [RS:0;3302f0f507bd:45577 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-08T00:36:06,613 INFO [RS:0;3302f0f507bd:45577 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-08T00:36:06,613 INFO [RS:0;3302f0f507bd:45577 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-08T00:36:06,613 INFO [RS:0;3302f0f507bd:45577 {}] hbase.ChoreService(168): Chore ScheduledChore name=3302f0f507bd,45577,1731026165616-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-08T00:36:06,630 INFO [RS:0;3302f0f507bd:45577 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-08T00:36:06,630 INFO [RS:0;3302f0f507bd:45577 {}] hbase.ChoreService(168): Chore ScheduledChore name=3302f0f507bd,45577,1731026165616-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-08T00:36:06,630 INFO [RS:0;3302f0f507bd:45577 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-08T00:36:06,630 INFO [RS:0;3302f0f507bd:45577 {}] regionserver.Replication(171): 3302f0f507bd,45577,1731026165616 started 2024-11-08T00:36:06,645 INFO [RS:0;3302f0f507bd:45577 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-08T00:36:06,645 INFO [RS:0;3302f0f507bd:45577 {}] regionserver.HRegionServer(1482): Serving as 3302f0f507bd,45577,1731026165616, RpcServer on 3302f0f507bd/172.17.0.3:45577, sessionid=0x10117df1a690001 2024-11-08T00:36:06,645 DEBUG [RS:0;3302f0f507bd:45577 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-08T00:36:06,645 DEBUG [RS:0;3302f0f507bd:45577 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 3302f0f507bd,45577,1731026165616 2024-11-08T00:36:06,645 DEBUG [RS:0;3302f0f507bd:45577 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '3302f0f507bd,45577,1731026165616' 2024-11-08T00:36:06,645 DEBUG [RS:0;3302f0f507bd:45577 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-08T00:36:06,646 DEBUG [RS:0;3302f0f507bd:45577 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-08T00:36:06,646 DEBUG [RS:0;3302f0f507bd:45577 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-08T00:36:06,646 DEBUG [RS:0;3302f0f507bd:45577 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-08T00:36:06,646 DEBUG [RS:0;3302f0f507bd:45577 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 3302f0f507bd,45577,1731026165616 2024-11-08T00:36:06,646 DEBUG [RS:0;3302f0f507bd:45577 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '3302f0f507bd,45577,1731026165616' 2024-11-08T00:36:06,646 DEBUG [RS:0;3302f0f507bd:45577 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-08T00:36:06,647 DEBUG [RS:0;3302f0f507bd:45577 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-08T00:36:06,647 DEBUG [RS:0;3302f0f507bd:45577 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-08T00:36:06,647 INFO [RS:0;3302f0f507bd:45577 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-08T00:36:06,647 INFO [RS:0;3302f0f507bd:45577 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-08T00:36:06,647 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,39151,1731026116929/3302f0f507bd%2C39151%2C1731026116929.1731026117246 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:36:06,656 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.meta.1731026116690.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:36:06,734 WARN [3302f0f507bd:39533 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-08T00:36:06,749 INFO [RS:0;3302f0f507bd:45577 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=3302f0f507bd%2C45577%2C1731026165616, suffix=, logDir=hdfs://localhost:34225/user/jenkins/test-data/a76a474e-2f29-615f-1108-03e596125458/WALs/3302f0f507bd,45577,1731026165616, archiveDir=hdfs://localhost:34225/user/jenkins/test-data/a76a474e-2f29-615f-1108-03e596125458/oldWALs, maxLogs=32 2024-11-08T00:36:06,751 INFO [RS:0;3302f0f507bd:45577 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3302f0f507bd%2C45577%2C1731026165616.1731026166750 2024-11-08T00:36:06,760 INFO [RS:0;3302f0f507bd:45577 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/a76a474e-2f29-615f-1108-03e596125458/WALs/3302f0f507bd,45577,1731026165616/3302f0f507bd%2C45577%2C1731026165616.1731026166750 2024-11-08T00:36:06,761 DEBUG [RS:0;3302f0f507bd:45577 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41001:41001),(127.0.0.1/127.0.0.1:37295:37295)] 2024-11-08T00:36:06,984 DEBUG [3302f0f507bd:39533 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-08T00:36:06,985 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=3302f0f507bd,45577,1731026165616 2024-11-08T00:36:06,987 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 3302f0f507bd,45577,1731026165616, state=OPENING 2024-11-08T00:36:07,055 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-08T00:36:07,066 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39533-0x10117df1a690000, quorum=127.0.0.1:60383, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T00:36:07,066 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45577-0x10117df1a690001, quorum=127.0.0.1:60383, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T00:36:07,066 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-08T00:36:07,066 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-08T00:36:07,066 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-08T00:36:07,066 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=3302f0f507bd,45577,1731026165616}] 2024-11-08T00:36:07,220 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-08T00:36:07,222 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:49049, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-08T00:36:07,226 INFO [RS_OPEN_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-08T00:36:07,226 INFO [RS_OPEN_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-08T00:36:07,229 INFO [RS_OPEN_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=3302f0f507bd%2C45577%2C1731026165616.meta, suffix=.meta, logDir=hdfs://localhost:34225/user/jenkins/test-data/a76a474e-2f29-615f-1108-03e596125458/WALs/3302f0f507bd,45577,1731026165616, archiveDir=hdfs://localhost:34225/user/jenkins/test-data/a76a474e-2f29-615f-1108-03e596125458/oldWALs, maxLogs=32 2024-11-08T00:36:07,229 INFO [RS_OPEN_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 3302f0f507bd%2C45577%2C1731026165616.meta.1731026167229.meta 2024-11-08T00:36:07,237 INFO [RS_OPEN_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/a76a474e-2f29-615f-1108-03e596125458/WALs/3302f0f507bd,45577,1731026165616/3302f0f507bd%2C45577%2C1731026165616.meta.1731026167229.meta 2024-11-08T00:36:07,238 DEBUG [RS_OPEN_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41001:41001),(127.0.0.1/127.0.0.1:37295:37295)] 2024-11-08T00:36:07,239 DEBUG [RS_OPEN_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-08T00:36:07,239 DEBUG [RS_OPEN_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-08T00:36:07,239 DEBUG [RS_OPEN_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-08T00:36:07,239 INFO [RS_OPEN_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-08T00:36:07,240 DEBUG [RS_OPEN_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-08T00:36:07,240 DEBUG [RS_OPEN_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-08T00:36:07,240 DEBUG [RS_OPEN_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-08T00:36:07,240 DEBUG [RS_OPEN_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-08T00:36:07,241 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-08T00:36:07,242 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-08T00:36:07,242 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T00:36:07,243 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-08T00:36:07,243 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-08T00:36:07,243 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-08T00:36:07,244 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T00:36:07,244 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-08T00:36:07,244 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-08T00:36:07,245 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-08T00:36:07,245 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T00:36:07,246 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-08T00:36:07,246 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-08T00:36:07,246 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-08T00:36:07,247 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T00:36:07,247 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-08T00:36:07,247 DEBUG [RS_OPEN_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-08T00:36:07,248 DEBUG [RS_OPEN_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34225/user/jenkins/test-data/a76a474e-2f29-615f-1108-03e596125458/data/hbase/meta/1588230740 2024-11-08T00:36:07,249 DEBUG [RS_OPEN_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34225/user/jenkins/test-data/a76a474e-2f29-615f-1108-03e596125458/data/hbase/meta/1588230740 2024-11-08T00:36:07,251 DEBUG [RS_OPEN_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-08T00:36:07,251 DEBUG [RS_OPEN_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-08T00:36:07,251 DEBUG [RS_OPEN_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-08T00:36:07,253 DEBUG [RS_OPEN_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-08T00:36:07,254 INFO [RS_OPEN_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=819533, jitterRate=0.04209122061729431}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-08T00:36:07,254 DEBUG [RS_OPEN_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-08T00:36:07,255 DEBUG [RS_OPEN_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731026167240Writing region info on filesystem at 1731026167240Initializing all the Stores at 1731026167241 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731026167241Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731026167241Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731026167241Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731026167241Cleaning up temporary data from old regions at 1731026167251 (+10 ms)Running coprocessor post-open hooks at 1731026167254 (+3 ms)Region opened successfully at 1731026167255 (+1 ms) 2024-11-08T00:36:07,256 INFO [RS_OPEN_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731026167219 2024-11-08T00:36:07,258 DEBUG [RS_OPEN_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-08T00:36:07,258 INFO [RS_OPEN_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-08T00:36:07,259 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=3302f0f507bd,45577,1731026165616 2024-11-08T00:36:07,260 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 3302f0f507bd,45577,1731026165616, state=OPEN 2024-11-08T00:36:07,305 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39533-0x10117df1a690000, quorum=127.0.0.1:60383, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-08T00:36:07,305 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45577-0x10117df1a690001, quorum=127.0.0.1:60383, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-08T00:36:07,305 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=3302f0f507bd,45577,1731026165616 2024-11-08T00:36:07,305 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-08T00:36:07,305 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-08T00:36:07,309 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-08T00:36:07,309 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=3302f0f507bd,45577,1731026165616 in 239 msec 2024-11-08T00:36:07,313 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-08T00:36:07,313 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 728 msec 2024-11-08T00:36:07,313 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-08T00:36:07,314 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-08T00:36:07,315 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-08T00:36:07,315 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3302f0f507bd,45577,1731026165616, seqNum=-1] 2024-11-08T00:36:07,315 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-08T00:36:07,317 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:41627, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-08T00:36:07,323 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 800 msec 2024-11-08T00:36:07,323 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731026167323, completionTime=-1 2024-11-08T00:36:07,323 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-08T00:36:07,323 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-08T00:36:07,325 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-08T00:36:07,325 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731026227325 2024-11-08T00:36:07,325 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731026287325 2024-11-08T00:36:07,325 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 2 msec 2024-11-08T00:36:07,326 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3302f0f507bd,39533,1731026165454-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-08T00:36:07,326 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3302f0f507bd,39533,1731026165454-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-08T00:36:07,326 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3302f0f507bd,39533,1731026165454-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-08T00:36:07,326 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-3302f0f507bd:39533, period=300000, unit=MILLISECONDS is enabled. 2024-11-08T00:36:07,326 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-08T00:36:07,327 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-08T00:36:07,329 DEBUG [master/3302f0f507bd:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-08T00:36:07,331 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.643sec 2024-11-08T00:36:07,331 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-08T00:36:07,331 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-08T00:36:07,331 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-08T00:36:07,331 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-08T00:36:07,331 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-08T00:36:07,331 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3302f0f507bd,39533,1731026165454-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-08T00:36:07,331 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3302f0f507bd,39533,1731026165454-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-08T00:36:07,334 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-08T00:36:07,334 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-08T00:36:07,334 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3302f0f507bd,39533,1731026165454-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-08T00:36:07,346 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5282eca5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-08T00:36:07,346 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 3302f0f507bd,39533,-1 for getting cluster id 2024-11-08T00:36:07,346 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-08T00:36:07,348 DEBUG [HMaster-EventLoopGroup-10-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '3a4ce26e-62ba-45f6-b72e-ace8867ad309' 2024-11-08T00:36:07,348 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-08T00:36:07,348 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "3a4ce26e-62ba-45f6-b72e-ace8867ad309" 2024-11-08T00:36:07,348 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@756904d3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-08T00:36:07,348 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [3302f0f507bd,39533,-1] 2024-11-08T00:36:07,349 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-08T00:36:07,349 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-08T00:36:07,350 INFO [HMaster-EventLoopGroup-10-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:39380, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-08T00:36:07,351 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@70770496, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-08T00:36:07,352 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-08T00:36:07,353 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3302f0f507bd,45577,1731026165616, seqNum=-1] 2024-11-08T00:36:07,353 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-08T00:36:07,355 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:59156, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-08T00:36:07,357 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=3302f0f507bd,39533,1731026165454 2024-11-08T00:36:07,357 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-08T00:36:07,360 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-08T00:36:07,360 INFO [Time-limited test {}] wal.TestLogRolling(320): Starting testLogRollOnPipelineRestart 2024-11-08T00:36:07,361 INFO [Time-limited test {}] wal.TestLogRolling(323): Replication=2 2024-11-08T00:36:07,361 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-08T00:36:07,362 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.AsyncConnectionImpl(321): The fetched master address is 3302f0f507bd,39533,1731026165454 2024-11-08T00:36:07,362 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@75a3965 2024-11-08T00:36:07,362 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-08T00:36:07,364 INFO [HMaster-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:39392, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-08T00:36:07,365 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39533 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-08T00:36:07,365 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39533 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-08T00:36:07,365 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39533 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.3 create 'TestLogRolling-testLogRollOnPipelineRestart', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-08T00:36:07,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39533 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart 2024-11-08T00:36:07,368 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_PRE_OPERATION 2024-11-08T00:36:07,368 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T00:36:07,368 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39533 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.3 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRollOnPipelineRestart" procId is: 4 2024-11-08T00:36:07,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39533 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-08T00:36:07,370 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-08T00:36:07,377 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42655 is added to blk_1073741835_1011 (size=395) 2024-11-08T00:36:07,377 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43193 is added to blk_1073741835_1011 (size=395) 2024-11-08T00:36:07,379 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 7af40624c00ea502246dccaba597f89c, NAME => 'TestLogRolling-testLogRollOnPipelineRestart,,1731026167365.7af40624c00ea502246dccaba597f89c.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRollOnPipelineRestart', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:34225/user/jenkins/test-data/a76a474e-2f29-615f-1108-03e596125458 2024-11-08T00:36:07,385 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42655 is added to blk_1073741836_1012 (size=78) 2024-11-08T00:36:07,385 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43193 is added to blk_1073741836_1012 (size=78) 2024-11-08T00:36:07,386 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnPipelineRestart,,1731026167365.7af40624c00ea502246dccaba597f89c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-08T00:36:07,386 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1722): Closing 7af40624c00ea502246dccaba597f89c, disabling compactions & flushes 2024-11-08T00:36:07,386 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnPipelineRestart,,1731026167365.7af40624c00ea502246dccaba597f89c. 2024-11-08T00:36:07,386 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnPipelineRestart,,1731026167365.7af40624c00ea502246dccaba597f89c. 2024-11-08T00:36:07,386 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnPipelineRestart,,1731026167365.7af40624c00ea502246dccaba597f89c. after waiting 0 ms 2024-11-08T00:36:07,386 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnPipelineRestart,,1731026167365.7af40624c00ea502246dccaba597f89c. 2024-11-08T00:36:07,386 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnPipelineRestart,,1731026167365.7af40624c00ea502246dccaba597f89c. 2024-11-08T00:36:07,386 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1676): Region close journal for 7af40624c00ea502246dccaba597f89c: Waiting for close lock at 1731026167386Disabling compacts and flushes for region at 1731026167386Disabling writes for close at 1731026167386Writing region close event to WAL at 1731026167386Closed at 1731026167386 2024-11-08T00:36:07,388 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_ADD_TO_META 2024-11-08T00:36:07,388 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRollOnPipelineRestart,,1731026167365.7af40624c00ea502246dccaba597f89c.","families":{"info":[{"qualifier":"regioninfo","vlen":77,"tag":[],"timestamp":"1731026167388"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731026167388"}]},"ts":"1731026167388"} 2024-11-08T00:36:07,390 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-08T00:36:07,392 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-08T00:36:07,392 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnPipelineRestart","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731026167392"}]},"ts":"1731026167392"} 2024-11-08T00:36:07,394 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnPipelineRestart, state=ENABLING in hbase:meta 2024-11-08T00:36:07,395 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=7af40624c00ea502246dccaba597f89c, ASSIGN}] 2024-11-08T00:36:07,396 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=7af40624c00ea502246dccaba597f89c, ASSIGN 2024-11-08T00:36:07,397 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=7af40624c00ea502246dccaba597f89c, ASSIGN; state=OFFLINE, location=3302f0f507bd,45577,1731026165616; forceNewPlan=false, retain=false 2024-11-08T00:36:07,470 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-08T00:36:07,489 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:36:07,489 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:36:07,489 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:36:07,490 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:36:07,490 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:36:07,491 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:36:07,495 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:36:07,495 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:36:07,496 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:36:07,499 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:36:07,548 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=7af40624c00ea502246dccaba597f89c, regionState=OPENING, regionLocation=3302f0f507bd,45577,1731026165616 2024-11-08T00:36:07,551 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-11-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=7af40624c00ea502246dccaba597f89c, ASSIGN because future has completed 2024-11-08T00:36:07,552 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 7af40624c00ea502246dccaba597f89c, server=3302f0f507bd,45577,1731026165616}] 2024-11-08T00:36:07,648 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,39151,1731026116929/3302f0f507bd%2C39151%2C1731026116929.1731026117246 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:36:07,656 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.meta.1731026116690.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:36:07,709 INFO [RS_OPEN_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRollOnPipelineRestart,,1731026167365.7af40624c00ea502246dccaba597f89c. 2024-11-08T00:36:07,710 DEBUG [RS_OPEN_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 7af40624c00ea502246dccaba597f89c, NAME => 'TestLogRolling-testLogRollOnPipelineRestart,,1731026167365.7af40624c00ea502246dccaba597f89c.', STARTKEY => '', ENDKEY => ''} 2024-11-08T00:36:07,710 DEBUG [RS_OPEN_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRollOnPipelineRestart 7af40624c00ea502246dccaba597f89c 2024-11-08T00:36:07,710 DEBUG [RS_OPEN_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnPipelineRestart,,1731026167365.7af40624c00ea502246dccaba597f89c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-08T00:36:07,710 DEBUG [RS_OPEN_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 7af40624c00ea502246dccaba597f89c 2024-11-08T00:36:07,710 DEBUG [RS_OPEN_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 7af40624c00ea502246dccaba597f89c 2024-11-08T00:36:07,712 INFO [StoreOpener-7af40624c00ea502246dccaba597f89c-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 7af40624c00ea502246dccaba597f89c 2024-11-08T00:36:07,713 INFO [StoreOpener-7af40624c00ea502246dccaba597f89c-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 7af40624c00ea502246dccaba597f89c columnFamilyName info 2024-11-08T00:36:07,713 DEBUG [StoreOpener-7af40624c00ea502246dccaba597f89c-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T00:36:07,714 INFO [StoreOpener-7af40624c00ea502246dccaba597f89c-1 {}] regionserver.HStore(327): Store=7af40624c00ea502246dccaba597f89c/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-08T00:36:07,714 DEBUG [RS_OPEN_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 7af40624c00ea502246dccaba597f89c 2024-11-08T00:36:07,715 DEBUG [RS_OPEN_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34225/user/jenkins/test-data/a76a474e-2f29-615f-1108-03e596125458/data/default/TestLogRolling-testLogRollOnPipelineRestart/7af40624c00ea502246dccaba597f89c 2024-11-08T00:36:07,715 DEBUG [RS_OPEN_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34225/user/jenkins/test-data/a76a474e-2f29-615f-1108-03e596125458/data/default/TestLogRolling-testLogRollOnPipelineRestart/7af40624c00ea502246dccaba597f89c 2024-11-08T00:36:07,715 DEBUG [RS_OPEN_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 7af40624c00ea502246dccaba597f89c 2024-11-08T00:36:07,715 DEBUG [RS_OPEN_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 7af40624c00ea502246dccaba597f89c 2024-11-08T00:36:07,717 DEBUG [RS_OPEN_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 7af40624c00ea502246dccaba597f89c 2024-11-08T00:36:07,719 DEBUG [RS_OPEN_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34225/user/jenkins/test-data/a76a474e-2f29-615f-1108-03e596125458/data/default/TestLogRolling-testLogRollOnPipelineRestart/7af40624c00ea502246dccaba597f89c/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-08T00:36:07,720 INFO [RS_OPEN_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 7af40624c00ea502246dccaba597f89c; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=750084, jitterRate=-0.046219587326049805}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-08T00:36:07,720 DEBUG [RS_OPEN_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 7af40624c00ea502246dccaba597f89c 2024-11-08T00:36:07,721 DEBUG [RS_OPEN_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 7af40624c00ea502246dccaba597f89c: Running coprocessor pre-open hook at 1731026167710Writing region info on filesystem at 1731026167710Initializing all the Stores at 1731026167711 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731026167711Cleaning up temporary data from old regions at 1731026167715 (+4 ms)Running coprocessor post-open hooks at 1731026167720 (+5 ms)Region opened successfully at 1731026167721 (+1 ms) 2024-11-08T00:36:07,722 INFO [RS_OPEN_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRollOnPipelineRestart,,1731026167365.7af40624c00ea502246dccaba597f89c., pid=6, masterSystemTime=1731026167706 2024-11-08T00:36:07,725 DEBUG [RS_OPEN_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRollOnPipelineRestart,,1731026167365.7af40624c00ea502246dccaba597f89c. 2024-11-08T00:36:07,725 INFO [RS_OPEN_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRollOnPipelineRestart,,1731026167365.7af40624c00ea502246dccaba597f89c. 2024-11-08T00:36:07,726 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=7af40624c00ea502246dccaba597f89c, regionState=OPEN, openSeqNum=2, regionLocation=3302f0f507bd,45577,1731026165616 2024-11-08T00:36:07,729 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-11-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 7af40624c00ea502246dccaba597f89c, server=3302f0f507bd,45577,1731026165616 because future has completed 2024-11-08T00:36:07,733 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-08T00:36:07,733 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 7af40624c00ea502246dccaba597f89c, server=3302f0f507bd,45577,1731026165616 in 178 msec 2024-11-08T00:36:07,735 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-08T00:36:07,735 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=7af40624c00ea502246dccaba597f89c, ASSIGN in 338 msec 2024-11-08T00:36:07,736 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-08T00:36:07,737 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnPipelineRestart","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731026167736"}]},"ts":"1731026167736"} 2024-11-08T00:36:07,738 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnPipelineRestart, state=ENABLED in hbase:meta 2024-11-08T00:36:07,739 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_POST_OPERATION 2024-11-08T00:36:07,742 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart in 375 msec 2024-11-08T00:36:08,649 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,39151,1731026116929/3302f0f507bd%2C39151%2C1731026116929.1731026117246 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:36:08,657 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.meta.1731026116690.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:36:09,650 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,39151,1731026116929/3302f0f507bd%2C39151%2C1731026116929.1731026117246 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:36:09,657 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.meta.1731026116690.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:36:10,651 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,39151,1731026116929/3302f0f507bd%2C39151%2C1731026116929.1731026117246 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:36:10,658 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.meta.1731026116690.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:36:11,651 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,39151,1731026116929/3302f0f507bd%2C39151%2C1731026116929.1731026117246 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:36:11,659 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.meta.1731026116690.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:36:11,966 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-08T00:36:11,966 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-08T00:36:11,968 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart 2024-11-08T00:36:11,968 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart Metrics about Tables on a single HBase RegionServer 2024-11-08T00:36:11,969 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-08T00:36:11,969 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-08T00:36:11,969 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-08T00:36:11,969 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-08T00:36:12,652 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,39151,1731026116929/3302f0f507bd%2C39151%2C1731026116929.1731026117246 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:36:12,659 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.meta.1731026116690.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:36:12,742 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-08T00:36:12,770 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:36:12,771 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:36:12,771 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:36:12,771 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:36:12,772 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:36:12,772 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:36:12,776 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:36:12,777 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:36:12,777 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:36:12,780 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:36:12,787 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-08T00:36:12,788 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRollOnPipelineRestart' 2024-11-08T00:36:13,653 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,39151,1731026116929/3302f0f507bd%2C39151%2C1731026116929.1731026117246 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:36:13,660 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.meta.1731026116690.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:36:14,654 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,39151,1731026116929/3302f0f507bd%2C39151%2C1731026116929.1731026117246 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:36:14,661 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.meta.1731026116690.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:36:15,654 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,39151,1731026116929/3302f0f507bd%2C39151%2C1731026116929.1731026117246 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:36:15,661 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.meta.1731026116690.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:36:16,655 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,39151,1731026116929/3302f0f507bd%2C39151%2C1731026116929.1731026117246 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:36:16,662 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.meta.1731026116690.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:36:17,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39533 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-08T00:36:17,374 INFO [RPCClient-NioEventLoopGroup-4-15 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRollOnPipelineRestart completed 2024-11-08T00:36:17,374 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRollOnPipelineRestart,, stopping at row=TestLogRolling-testLogRollOnPipelineRestart ,, for max=2147483647 with caching=100 2024-11-08T00:36:17,380 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRollOnPipelineRestart 2024-11-08T00:36:17,380 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRollOnPipelineRestart,,1731026167365.7af40624c00ea502246dccaba597f89c. 2024-11-08T00:36:17,384 DEBUG [RPCClient-NioEventLoopGroup-4-14 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRollOnPipelineRestart', row='row1002', locateType=CURRENT is [region=TestLogRolling-testLogRollOnPipelineRestart,,1731026167365.7af40624c00ea502246dccaba597f89c., hostname=3302f0f507bd,45577,1731026165616, seqNum=2] 2024-11-08T00:36:17,656 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,39151,1731026116929/3302f0f507bd%2C39151%2C1731026116929.1731026117246 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:36:17,662 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.meta.1731026116690.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:36:18,656 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,39151,1731026116929/3302f0f507bd%2C39151%2C1731026116929.1731026117246 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:36:18,663 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.meta.1731026116690.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:36:19,387 INFO [Time-limited test {}] wal.TestLogRolling(360): log.getCurrentFileName()): hdfs://localhost:34225/user/jenkins/test-data/a76a474e-2f29-615f-1108-03e596125458/WALs/3302f0f507bd,45577,1731026165616/3302f0f507bd%2C45577%2C1731026165616.1731026166750 2024-11-08T00:36:19,388 WARN [ResponseProcessor for block BP-1358657480-172.17.0.3-1731026163290:blk_1073741830_1006 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1358657480-172.17.0.3-1731026163290:blk_1073741830_1006 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-08T00:36:19,388 WARN [ResponseProcessor for block BP-1358657480-172.17.0.3-1731026163290:blk_1073741834_1010 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1358657480-172.17.0.3-1731026163290:blk_1073741834_1010 java.io.IOException: Bad response ERROR for BP-1358657480-172.17.0.3-1731026163290:blk_1073741834_1010 from datanode DatanodeInfoWithStorage[127.0.0.1:42655,DS-d33fbd53-d237-460d-9d3e-81af0606845b,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-08T00:36:19,389 WARN [DataStreamer for file /user/jenkins/test-data/a76a474e-2f29-615f-1108-03e596125458/MasterData/WALs/3302f0f507bd,39533,1731026165454/3302f0f507bd%2C39533%2C1731026165454.1731026166160 block BP-1358657480-172.17.0.3-1731026163290:blk_1073741830_1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1358657480-172.17.0.3-1731026163290:blk_1073741830_1006 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42655,DS-d33fbd53-d237-460d-9d3e-81af0606845b,DISK], DatanodeInfoWithStorage[127.0.0.1:43193,DS-45f9592c-a91b-4f06-84fe-85833bab1282,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:42655,DS-d33fbd53-d237-460d-9d3e-81af0606845b,DISK]) is bad. 2024-11-08T00:36:19,389 WARN [ResponseProcessor for block BP-1358657480-172.17.0.3-1731026163290:blk_1073741833_1009 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1358657480-172.17.0.3-1731026163290:blk_1073741833_1009 java.io.IOException: Bad response ERROR for BP-1358657480-172.17.0.3-1731026163290:blk_1073741833_1009 from datanode DatanodeInfoWithStorage[127.0.0.1:42655,DS-d33fbd53-d237-460d-9d3e-81af0606845b,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-08T00:36:19,389 WARN [DataStreamer for file /user/jenkins/test-data/a76a474e-2f29-615f-1108-03e596125458/WALs/3302f0f507bd,45577,1731026165616/3302f0f507bd%2C45577%2C1731026165616.1731026166750 block BP-1358657480-172.17.0.3-1731026163290:blk_1073741833_1009 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1358657480-172.17.0.3-1731026163290:blk_1073741833_1009 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43193,DS-45f9592c-a91b-4f06-84fe-85833bab1282,DISK], DatanodeInfoWithStorage[127.0.0.1:42655,DS-d33fbd53-d237-460d-9d3e-81af0606845b,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:42655,DS-d33fbd53-d237-460d-9d3e-81af0606845b,DISK]) is bad. 2024-11-08T00:36:19,389 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-896153742_22 at /127.0.0.1:36176 [Receiving block BP-1358657480-172.17.0.3-1731026163290:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:42655:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36176 dst: /127.0.0.1:42655 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-08T00:36:19,389 WARN [PacketResponder: BP-1358657480-172.17.0.3-1731026163290:blk_1073741833_1009, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:42655] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-08T00:36:19,390 WARN [DataStreamer for file /user/jenkins/test-data/a76a474e-2f29-615f-1108-03e596125458/WALs/3302f0f507bd,45577,1731026165616/3302f0f507bd%2C45577%2C1731026165616.meta.1731026167229.meta block BP-1358657480-172.17.0.3-1731026163290:blk_1073741834_1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1358657480-172.17.0.3-1731026163290:blk_1073741834_1010 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43193,DS-45f9592c-a91b-4f06-84fe-85833bab1282,DISK], DatanodeInfoWithStorage[127.0.0.1:42655,DS-d33fbd53-d237-460d-9d3e-81af0606845b,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:42655,DS-d33fbd53-d237-460d-9d3e-81af0606845b,DISK]) is bad. 2024-11-08T00:36:19,390 WARN [PacketResponder: BP-1358657480-172.17.0.3-1731026163290:blk_1073741834_1010, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:42655] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-08T00:36:19,390 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-896153742_22 at /127.0.0.1:57838 [Receiving block BP-1358657480-172.17.0.3-1731026163290:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:43193:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:57838 dst: /127.0.0.1:43193 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-08T00:36:19,390 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1853582111_22 at /127.0.0.1:57864 [Receiving block BP-1358657480-172.17.0.3-1731026163290:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:43193:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:57864 dst: /127.0.0.1:43193 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-08T00:36:19,390 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1853582111_22 at /127.0.0.1:57876 [Receiving block BP-1358657480-172.17.0.3-1731026163290:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:43193:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:57876 dst: /127.0.0.1:43193 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-08T00:36:19,390 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1853582111_22 at /127.0.0.1:36214 [Receiving block BP-1358657480-172.17.0.3-1731026163290:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:42655:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36214 dst: /127.0.0.1:42655 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-08T00:36:19,390 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1853582111_22 at /127.0.0.1:36216 [Receiving block BP-1358657480-172.17.0.3-1731026163290:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:42655:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36216 dst: /127.0.0.1:42655 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-08T00:36:19,436 WARN [BP-1358657480-172.17.0.3-1731026163290 heartbeating to localhost/127.0.0.1:34225 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1358657480-172.17.0.3-1731026163290 (Datanode Uuid 44ed9313-edd8-4d80-8a5d-c2753b809252) service to localhost/127.0.0.1:34225 2024-11-08T00:36:19,467 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a5199574-9c10-014b-fdba-002f0e9cd2f6/cluster_bb62c712-dc1b-a57d-173a-c6e86e8d7958/data/data3/current/BP-1358657480-172.17.0.3-1731026163290 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-08T00:36:19,468 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a5199574-9c10-014b-fdba-002f0e9cd2f6/cluster_bb62c712-dc1b-a57d-173a-c6e86e8d7958/data/data4/current/BP-1358657480-172.17.0.3-1731026163290 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-08T00:36:19,469 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@bd6b006{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-08T00:36:19,469 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@277eda4d{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-08T00:36:19,469 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-08T00:36:19,469 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@54dbaae8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-08T00:36:19,469 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@e7f214b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a5199574-9c10-014b-fdba-002f0e9cd2f6/hadoop.log.dir/,STOPPED} 2024-11-08T00:36:19,470 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-08T00:36:19,478 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-08T00:36:19,482 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-08T00:36:19,483 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-08T00:36:19,483 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-08T00:36:19,483 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-08T00:36:19,484 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6e793ffb{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a5199574-9c10-014b-fdba-002f0e9cd2f6/hadoop.log.dir/,AVAILABLE} 2024-11-08T00:36:19,484 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6299f50b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-08T00:36:19,594 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5a382d25{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a5199574-9c10-014b-fdba-002f0e9cd2f6/java.io.tmpdir/jetty-localhost-41145-hadoop-hdfs-3_4_1-tests_jar-_-any-12072504713111322986/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-08T00:36:19,594 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@59462733{HTTP/1.1, (http/1.1)}{localhost:41145} 2024-11-08T00:36:19,594 INFO [Time-limited test {}] server.Server(415): Started @173692ms 2024-11-08T00:36:19,596 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-08T00:36:19,623 WARN [ResponseProcessor for block BP-1358657480-172.17.0.3-1731026163290:blk_1073741834_1014 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1358657480-172.17.0.3-1731026163290:blk_1073741834_1014 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-08T00:36:19,623 WARN [ResponseProcessor for block BP-1358657480-172.17.0.3-1731026163290:blk_1073741833_1013 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1358657480-172.17.0.3-1731026163290:blk_1073741833_1013 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-08T00:36:19,623 WARN [ResponseProcessor for block BP-1358657480-172.17.0.3-1731026163290:blk_1073741830_1015 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1358657480-172.17.0.3-1731026163290:blk_1073741830_1015 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-08T00:36:19,624 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1853582111_22 at /127.0.0.1:34504 [Receiving block BP-1358657480-172.17.0.3-1731026163290:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:43193:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:34504 dst: /127.0.0.1:43193 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-08T00:36:19,624 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1853582111_22 at /127.0.0.1:34502 [Receiving block BP-1358657480-172.17.0.3-1731026163290:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:43193:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:34502 dst: /127.0.0.1:43193 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-08T00:36:19,624 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-896153742_22 at /127.0.0.1:34514 [Receiving block BP-1358657480-172.17.0.3-1731026163290:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:43193:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:34514 dst: /127.0.0.1:43193 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-08T00:36:19,625 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3192c1d0{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-08T00:36:19,626 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@89ebf29{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-08T00:36:19,626 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-08T00:36:19,626 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@123783d2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-08T00:36:19,626 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@60043bb8{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a5199574-9c10-014b-fdba-002f0e9cd2f6/hadoop.log.dir/,STOPPED} 2024-11-08T00:36:19,627 WARN [BP-1358657480-172.17.0.3-1731026163290 heartbeating to localhost/127.0.0.1:34225 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-08T00:36:19,627 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-08T00:36:19,627 WARN [BP-1358657480-172.17.0.3-1731026163290 heartbeating to localhost/127.0.0.1:34225 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1358657480-172.17.0.3-1731026163290 (Datanode Uuid 55ce6167-532a-40a5-9fd7-373da715fc23) service to localhost/127.0.0.1:34225 2024-11-08T00:36:19,627 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-08T00:36:19,628 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a5199574-9c10-014b-fdba-002f0e9cd2f6/cluster_bb62c712-dc1b-a57d-173a-c6e86e8d7958/data/data1/current/BP-1358657480-172.17.0.3-1731026163290 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-08T00:36:19,628 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a5199574-9c10-014b-fdba-002f0e9cd2f6/cluster_bb62c712-dc1b-a57d-173a-c6e86e8d7958/data/data2/current/BP-1358657480-172.17.0.3-1731026163290 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-08T00:36:19,628 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-08T00:36:19,635 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-08T00:36:19,638 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-08T00:36:19,639 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-08T00:36:19,639 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-08T00:36:19,639 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-08T00:36:19,639 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@58a9274b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a5199574-9c10-014b-fdba-002f0e9cd2f6/hadoop.log.dir/,AVAILABLE} 2024-11-08T00:36:19,640 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@35936f2e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-08T00:36:19,657 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,39151,1731026116929/3302f0f507bd%2C39151%2C1731026116929.1731026117246 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:36:19,663 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.meta.1731026116690.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:36:19,754 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5091fc79{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a5199574-9c10-014b-fdba-002f0e9cd2f6/java.io.tmpdir/jetty-localhost-40957-hadoop-hdfs-3_4_1-tests_jar-_-any-9350829166345514082/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-08T00:36:19,754 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5a362569{HTTP/1.1, (http/1.1)}{localhost:40957} 2024-11-08T00:36:19,754 INFO [Time-limited test {}] server.Server(415): Started @173851ms 2024-11-08T00:36:19,755 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-08T00:36:20,306 WARN [Thread-1330 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-08T00:36:20,309 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe7c7bb7530d63b0a with lease ID 0xa60536ce372f037: from storage DS-d33fbd53-d237-460d-9d3e-81af0606845b node DatanodeRegistration(127.0.0.1:42409, datanodeUuid=44ed9313-edd8-4d80-8a5d-c2753b809252, infoPort=33385, infoSecurePort=0, ipcPort=34783, storageInfo=lv=-57;cid=testClusterID;nsid=1907776356;c=1731026163290), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-08T00:36:20,309 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe7c7bb7530d63b0a with lease ID 0xa60536ce372f037: from storage DS-3af8b3ce-cfd2-47b9-9084-f54afdc69111 node DatanodeRegistration(127.0.0.1:42409, datanodeUuid=44ed9313-edd8-4d80-8a5d-c2753b809252, infoPort=33385, infoSecurePort=0, ipcPort=34783, storageInfo=lv=-57;cid=testClusterID;nsid=1907776356;c=1731026163290), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-08T00:36:20,376 WARN [Thread-1350 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-08T00:36:20,378 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xdadc59a7d2e62fe6 with lease ID 0xa60536ce372f038: from storage DS-45f9592c-a91b-4f06-84fe-85833bab1282 node DatanodeRegistration(127.0.0.1:35565, datanodeUuid=55ce6167-532a-40a5-9fd7-373da715fc23, infoPort=45929, infoSecurePort=0, ipcPort=36305, storageInfo=lv=-57;cid=testClusterID;nsid=1907776356;c=1731026163290), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-08T00:36:20,378 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xdadc59a7d2e62fe6 with lease ID 0xa60536ce372f038: from storage DS-0d8848cf-4113-44a6-a763-ac3543e62810 node DatanodeRegistration(127.0.0.1:35565, datanodeUuid=55ce6167-532a-40a5-9fd7-373da715fc23, infoPort=45929, infoSecurePort=0, ipcPort=36305, storageInfo=lv=-57;cid=testClusterID;nsid=1907776356;c=1731026163290), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-08T00:36:20,658 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,39151,1731026116929/3302f0f507bd%2C39151%2C1731026116929.1731026117246 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:36:20,664 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.meta.1731026116690.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:36:20,776 INFO [Time-limited test {}] wal.TestLogRolling(372): Data Nodes restarted 2024-11-08T00:36:20,779 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1002 2024-11-08T00:36:20,781 ERROR [FSHLog-0-hdfs://localhost:34225/user/jenkins/test-data/a76a474e-2f29-615f-1108-03e596125458-prefix:3302f0f507bd,45577,1731026165616 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43193,DS-45f9592c-a91b-4f06-84fe-85833bab1282,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-08T00:36:20,782 WARN [FSHLog-0-hdfs://localhost:34225/user/jenkins/test-data/a76a474e-2f29-615f-1108-03e596125458-prefix:3302f0f507bd,45577,1731026165616 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43193,DS-45f9592c-a91b-4f06-84fe-85833bab1282,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-08T00:36:20,782 DEBUG [regionserver/3302f0f507bd:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 3302f0f507bd%2C45577%2C1731026165616:(num 1731026166750) roll requested 2024-11-08T00:36:20,782 INFO [regionserver/3302f0f507bd:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3302f0f507bd%2C45577%2C1731026165616.1731026180782 2024-11-08T00:36:20,789 DEBUG [regionserver/3302f0f507bd:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:34225/user/jenkins/test-data/a76a474e-2f29-615f-1108-03e596125458/WALs/3302f0f507bd,45577,1731026165616/3302f0f507bd%2C45577%2C1731026165616.1731026166750 newFile=hdfs://localhost:34225/user/jenkins/test-data/a76a474e-2f29-615f-1108-03e596125458/WALs/3302f0f507bd,45577,1731026165616/3302f0f507bd%2C45577%2C1731026165616.1731026180782 2024-11-08T00:36:20,790 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:36:20,790 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:36:20,790 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:36:20,790 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:36:20,790 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:36:20,791 INFO [regionserver/3302f0f507bd:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/a76a474e-2f29-615f-1108-03e596125458/WALs/3302f0f507bd,45577,1731026165616/3302f0f507bd%2C45577%2C1731026165616.1731026166750 with entries=2, filesize=1.59 KB; new WAL /user/jenkins/test-data/a76a474e-2f29-615f-1108-03e596125458/WALs/3302f0f507bd,45577,1731026165616/3302f0f507bd%2C45577%2C1731026165616.1731026180782 2024-11-08T00:36:20,791 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43193,DS-45f9592c-a91b-4f06-84fe-85833bab1282,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-08T00:36:20,791 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43193,DS-45f9592c-a91b-4f06-84fe-85833bab1282,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-08T00:36:20,791 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:34225/user/jenkins/test-data/a76a474e-2f29-615f-1108-03e596125458/WALs/3302f0f507bd,45577,1731026165616/3302f0f507bd%2C45577%2C1731026165616.1731026166750 2024-11-08T00:36:20,792 DEBUG [regionserver/3302f0f507bd:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45929:45929),(127.0.0.1/127.0.0.1:33385:33385)] 2024-11-08T00:36:20,792 DEBUG [regionserver/3302f0f507bd:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:34225/user/jenkins/test-data/a76a474e-2f29-615f-1108-03e596125458/WALs/3302f0f507bd,45577,1731026165616/3302f0f507bd%2C45577%2C1731026165616.1731026166750 is not closed yet, will try archiving it next time 2024-11-08T00:36:20,792 WARN [IPC Server handler 3 on default port 34225 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/a76a474e-2f29-615f-1108-03e596125458/WALs/3302f0f507bd,45577,1731026165616/3302f0f507bd%2C45577%2C1731026165616.1731026166750 has not been closed. Lease recovery is in progress. RecoveryId = 1017 for block blk_1073741833_1013 2024-11-08T00:36:20,792 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:34225/user/jenkins/test-data/a76a474e-2f29-615f-1108-03e596125458/WALs/3302f0f507bd,45577,1731026165616/3302f0f507bd%2C45577%2C1731026165616.1731026166750 after 1ms 2024-11-08T00:36:21,659 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,39151,1731026116929/3302f0f507bd%2C39151%2C1731026116929.1731026117246 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:36:21,664 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.meta.1731026116690.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:36:22,309 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741833_1013: GenerationStamp not matched, existing replica is blk_1073741833_1009 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-08T00:36:22,660 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,39151,1731026116929/3302f0f507bd%2C39151%2C1731026116929.1731026117246 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:36:22,665 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.meta.1731026116690.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:36:22,795 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1003 2024-11-08T00:36:23,660 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,39151,1731026116929/3302f0f507bd%2C39151%2C1731026116929.1731026117246 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:36:23,666 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.meta.1731026116690.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:36:24,661 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,39151,1731026116929/3302f0f507bd%2C39151%2C1731026116929.1731026117246 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:36:24,666 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.meta.1731026116690.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:36:24,793 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:34225/user/jenkins/test-data/a76a474e-2f29-615f-1108-03e596125458/WALs/3302f0f507bd,45577,1731026165616/3302f0f507bd%2C45577%2C1731026165616.1731026166750 after 4002ms 2024-11-08T00:36:24,798 WARN [ResponseProcessor for block BP-1358657480-172.17.0.3-1731026163290:blk_1073741837_1016 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1358657480-172.17.0.3-1731026163290:blk_1073741837_1016 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-08T00:36:24,799 WARN [DataStreamer for file /user/jenkins/test-data/a76a474e-2f29-615f-1108-03e596125458/WALs/3302f0f507bd,45577,1731026165616/3302f0f507bd%2C45577%2C1731026165616.1731026180782 block BP-1358657480-172.17.0.3-1731026163290:blk_1073741837_1016 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1358657480-172.17.0.3-1731026163290:blk_1073741837_1016 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35565,DS-45f9592c-a91b-4f06-84fe-85833bab1282,DISK], DatanodeInfoWithStorage[127.0.0.1:42409,DS-d33fbd53-d237-460d-9d3e-81af0606845b,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35565,DS-45f9592c-a91b-4f06-84fe-85833bab1282,DISK]) is bad. 2024-11-08T00:36:24,799 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1853582111_22 at /127.0.0.1:49124 [Receiving block BP-1358657480-172.17.0.3-1731026163290:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:42409:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:49124 dst: /127.0.0.1:42409 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-08T00:36:24,799 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1853582111_22 at /127.0.0.1:41460 [Receiving block BP-1358657480-172.17.0.3-1731026163290:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:35565:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41460 dst: /127.0.0.1:35565 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-08T00:36:24,835 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5091fc79{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-08T00:36:24,835 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5a362569{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-08T00:36:24,836 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-08T00:36:24,836 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@35936f2e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-08T00:36:24,836 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@58a9274b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a5199574-9c10-014b-fdba-002f0e9cd2f6/hadoop.log.dir/,STOPPED} 2024-11-08T00:36:24,837 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-08T00:36:24,837 WARN [BP-1358657480-172.17.0.3-1731026163290 heartbeating to localhost/127.0.0.1:34225 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-08T00:36:24,837 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-08T00:36:24,837 WARN [BP-1358657480-172.17.0.3-1731026163290 heartbeating to localhost/127.0.0.1:34225 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1358657480-172.17.0.3-1731026163290 (Datanode Uuid 55ce6167-532a-40a5-9fd7-373da715fc23) service to localhost/127.0.0.1:34225 2024-11-08T00:36:24,838 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a5199574-9c10-014b-fdba-002f0e9cd2f6/cluster_bb62c712-dc1b-a57d-173a-c6e86e8d7958/data/data1/current/BP-1358657480-172.17.0.3-1731026163290 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-08T00:36:24,838 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a5199574-9c10-014b-fdba-002f0e9cd2f6/cluster_bb62c712-dc1b-a57d-173a-c6e86e8d7958/data/data2/current/BP-1358657480-172.17.0.3-1731026163290 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-08T00:36:24,838 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-08T00:36:24,858 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-08T00:36:24,862 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-08T00:36:24,862 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-08T00:36:24,862 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-08T00:36:24,863 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-08T00:36:24,863 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@68df0564{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a5199574-9c10-014b-fdba-002f0e9cd2f6/hadoop.log.dir/,AVAILABLE} 2024-11-08T00:36:24,863 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5268e5d2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-08T00:36:24,976 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4b547078{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a5199574-9c10-014b-fdba-002f0e9cd2f6/java.io.tmpdir/jetty-localhost-37877-hadoop-hdfs-3_4_1-tests_jar-_-any-17124070272086144541/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-08T00:36:24,976 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@424d5648{HTTP/1.1, (http/1.1)}{localhost:37877} 2024-11-08T00:36:24,976 INFO [Time-limited test {}] server.Server(415): Started @179073ms 2024-11-08T00:36:24,978 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-08T00:36:25,017 WARN [ResponseProcessor for block BP-1358657480-172.17.0.3-1731026163290:blk_1073741837_1018 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1358657480-172.17.0.3-1731026163290:blk_1073741837_1018 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-08T00:36:25,017 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1853582111_22 at /127.0.0.1:59370 [Receiving block BP-1358657480-172.17.0.3-1731026163290:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:42409:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:59370 dst: /127.0.0.1:42409 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-08T00:36:25,019 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5a382d25{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-08T00:36:25,019 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@59462733{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-08T00:36:25,020 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-08T00:36:25,020 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6299f50b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-08T00:36:25,020 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6e793ffb{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a5199574-9c10-014b-fdba-002f0e9cd2f6/hadoop.log.dir/,STOPPED} 2024-11-08T00:36:25,021 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-08T00:36:25,021 WARN [BP-1358657480-172.17.0.3-1731026163290 heartbeating to localhost/127.0.0.1:34225 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-08T00:36:25,021 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-08T00:36:25,021 WARN [BP-1358657480-172.17.0.3-1731026163290 heartbeating to localhost/127.0.0.1:34225 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1358657480-172.17.0.3-1731026163290 (Datanode Uuid 44ed9313-edd8-4d80-8a5d-c2753b809252) service to localhost/127.0.0.1:34225 2024-11-08T00:36:25,022 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a5199574-9c10-014b-fdba-002f0e9cd2f6/cluster_bb62c712-dc1b-a57d-173a-c6e86e8d7958/data/data3/current/BP-1358657480-172.17.0.3-1731026163290 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-08T00:36:25,022 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a5199574-9c10-014b-fdba-002f0e9cd2f6/cluster_bb62c712-dc1b-a57d-173a-c6e86e8d7958/data/data4/current/BP-1358657480-172.17.0.3-1731026163290 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-08T00:36:25,022 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-08T00:36:25,030 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-08T00:36:25,035 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-08T00:36:25,037 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-08T00:36:25,037 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-08T00:36:25,037 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-08T00:36:25,038 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@523d3401{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a5199574-9c10-014b-fdba-002f0e9cd2f6/hadoop.log.dir/,AVAILABLE} 2024-11-08T00:36:25,038 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@71ef12de{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-08T00:36:25,142 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@c19f11a{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a5199574-9c10-014b-fdba-002f0e9cd2f6/java.io.tmpdir/jetty-localhost-36709-hadoop-hdfs-3_4_1-tests_jar-_-any-11161166699669055486/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-08T00:36:25,142 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@75d9c3d8{HTTP/1.1, (http/1.1)}{localhost:36709} 2024-11-08T00:36:25,142 INFO [Time-limited test {}] server.Server(415): Started @179240ms 2024-11-08T00:36:25,144 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-08T00:36:25,613 WARN [Thread-1404 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-08T00:36:25,615 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xccddd70415415faf with lease ID 0xa60536ce372f039: from storage DS-45f9592c-a91b-4f06-84fe-85833bab1282 node DatanodeRegistration(127.0.0.1:38603, datanodeUuid=55ce6167-532a-40a5-9fd7-373da715fc23, infoPort=38161, infoSecurePort=0, ipcPort=43629, storageInfo=lv=-57;cid=testClusterID;nsid=1907776356;c=1731026163290), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-08T00:36:25,615 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xccddd70415415faf with lease ID 0xa60536ce372f039: from storage DS-0d8848cf-4113-44a6-a763-ac3543e62810 node DatanodeRegistration(127.0.0.1:38603, datanodeUuid=55ce6167-532a-40a5-9fd7-373da715fc23, infoPort=38161, infoSecurePort=0, ipcPort=43629, storageInfo=lv=-57;cid=testClusterID;nsid=1907776356;c=1731026163290), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-08T00:36:25,662 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,39151,1731026116929/3302f0f507bd%2C39151%2C1731026116929.1731026117246 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:36:25,667 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.meta.1731026116690.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:36:25,693 WARN [Thread-1424 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-08T00:36:25,696 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x11459032e333fbf8 with lease ID 0xa60536ce372f03a: from storage DS-d33fbd53-d237-460d-9d3e-81af0606845b node DatanodeRegistration(127.0.0.1:46657, datanodeUuid=44ed9313-edd8-4d80-8a5d-c2753b809252, infoPort=42849, infoSecurePort=0, ipcPort=37549, storageInfo=lv=-57;cid=testClusterID;nsid=1907776356;c=1731026163290), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-08T00:36:25,696 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x11459032e333fbf8 with lease ID 0xa60536ce372f03a: from storage DS-3af8b3ce-cfd2-47b9-9084-f54afdc69111 node DatanodeRegistration(127.0.0.1:46657, datanodeUuid=44ed9313-edd8-4d80-8a5d-c2753b809252, infoPort=42849, infoSecurePort=0, ipcPort=37549, storageInfo=lv=-57;cid=testClusterID;nsid=1907776356;c=1731026163290), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-08T00:36:26,166 INFO [Time-limited test {}] wal.TestLogRolling(389): Data Nodes restarted 2024-11-08T00:36:26,169 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1004 2024-11-08T00:36:26,171 ERROR [FSHLog-0-hdfs://localhost:34225/user/jenkins/test-data/a76a474e-2f29-615f-1108-03e596125458-prefix:3302f0f507bd,45577,1731026165616 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42409,DS-d33fbd53-d237-460d-9d3e-81af0606845b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-08T00:36:26,171 WARN [FSHLog-0-hdfs://localhost:34225/user/jenkins/test-data/a76a474e-2f29-615f-1108-03e596125458-prefix:3302f0f507bd,45577,1731026165616 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42409,DS-d33fbd53-d237-460d-9d3e-81af0606845b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-08T00:36:26,171 DEBUG [regionserver/3302f0f507bd:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 3302f0f507bd%2C45577%2C1731026165616:(num 1731026180782) roll requested 2024-11-08T00:36:26,172 INFO [regionserver/3302f0f507bd:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3302f0f507bd%2C45577%2C1731026165616.1731026186171 2024-11-08T00:36:26,179 DEBUG [regionserver/3302f0f507bd:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:34225/user/jenkins/test-data/a76a474e-2f29-615f-1108-03e596125458/WALs/3302f0f507bd,45577,1731026165616/3302f0f507bd%2C45577%2C1731026165616.1731026180782 newFile=hdfs://localhost:34225/user/jenkins/test-data/a76a474e-2f29-615f-1108-03e596125458/WALs/3302f0f507bd,45577,1731026165616/3302f0f507bd%2C45577%2C1731026165616.1731026186171 2024-11-08T00:36:26,179 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:36:26,179 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:36:26,179 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:36:26,179 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:36:26,179 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:36:26,180 INFO [regionserver/3302f0f507bd:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/a76a474e-2f29-615f-1108-03e596125458/WALs/3302f0f507bd,45577,1731026165616/3302f0f507bd%2C45577%2C1731026165616.1731026180782 with entries=2, filesize=2.37 KB; new WAL /user/jenkins/test-data/a76a474e-2f29-615f-1108-03e596125458/WALs/3302f0f507bd,45577,1731026165616/3302f0f507bd%2C45577%2C1731026165616.1731026186171 2024-11-08T00:36:26,180 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42409,DS-d33fbd53-d237-460d-9d3e-81af0606845b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-08T00:36:26,180 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42409,DS-d33fbd53-d237-460d-9d3e-81af0606845b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-08T00:36:26,180 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:34225/user/jenkins/test-data/a76a474e-2f29-615f-1108-03e596125458/WALs/3302f0f507bd,45577,1731026165616/3302f0f507bd%2C45577%2C1731026165616.1731026180782 2024-11-08T00:36:26,180 DEBUG [regionserver/3302f0f507bd:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42849:42849),(127.0.0.1/127.0.0.1:38161:38161)] 2024-11-08T00:36:26,180 DEBUG [regionserver/3302f0f507bd:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:34225/user/jenkins/test-data/a76a474e-2f29-615f-1108-03e596125458/WALs/3302f0f507bd,45577,1731026165616/3302f0f507bd%2C45577%2C1731026165616.1731026180782 is not closed yet, will try archiving it next time 2024-11-08T00:36:26,180 WARN [IPC Server handler 4 on default port 34225 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/a76a474e-2f29-615f-1108-03e596125458/WALs/3302f0f507bd,45577,1731026165616/3302f0f507bd%2C45577%2C1731026165616.1731026180782 has not been closed. Lease recovery is in progress. RecoveryId = 1020 for block blk_1073741837_1018 2024-11-08T00:36:26,181 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:34225/user/jenkins/test-data/a76a474e-2f29-615f-1108-03e596125458/WALs/3302f0f507bd,45577,1731026165616/3302f0f507bd%2C45577%2C1731026165616.1731026180782 after 1ms 2024-11-08T00:36:26,663 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,39151,1731026116929/3302f0f507bd%2C39151%2C1731026116929.1731026117246 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:36:26,668 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.meta.1731026116690.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:36:27,664 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,39151,1731026116929/3302f0f507bd%2C39151%2C1731026116929.1731026117246 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:36:27,669 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.meta.1731026116690.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:36:28,182 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3302f0f507bd%2C45577%2C1731026165616.1731026188182 2024-11-08T00:36:28,197 DEBUG [Time-limited test {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:34225/user/jenkins/test-data/a76a474e-2f29-615f-1108-03e596125458/WALs/3302f0f507bd,45577,1731026165616/3302f0f507bd%2C45577%2C1731026165616.1731026186171 newFile=hdfs://localhost:34225/user/jenkins/test-data/a76a474e-2f29-615f-1108-03e596125458/WALs/3302f0f507bd,45577,1731026165616/3302f0f507bd%2C45577%2C1731026165616.1731026188182 2024-11-08T00:36:28,197 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:36:28,200 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:36:28,200 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:36:28,200 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:36:28,200 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:36:28,200 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/a76a474e-2f29-615f-1108-03e596125458/WALs/3302f0f507bd,45577,1731026165616/3302f0f507bd%2C45577%2C1731026165616.1731026186171 with entries=1, filesize=1.23 KB; new WAL /user/jenkins/test-data/a76a474e-2f29-615f-1108-03e596125458/WALs/3302f0f507bd,45577,1731026165616/3302f0f507bd%2C45577%2C1731026165616.1731026188182 2024-11-08T00:36:28,201 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42849:42849),(127.0.0.1/127.0.0.1:38161:38161)] 2024-11-08T00:36:28,201 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:34225/user/jenkins/test-data/a76a474e-2f29-615f-1108-03e596125458/WALs/3302f0f507bd,45577,1731026165616/3302f0f507bd%2C45577%2C1731026165616.1731026180782 is not closed yet, will try archiving it next time 2024-11-08T00:36:28,201 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:34225/user/jenkins/test-data/a76a474e-2f29-615f-1108-03e596125458/WALs/3302f0f507bd,45577,1731026165616/3302f0f507bd%2C45577%2C1731026165616.1731026186171 is not closed yet, will try archiving it next time 2024-11-08T00:36:28,202 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:34225/user/jenkins/test-data/a76a474e-2f29-615f-1108-03e596125458/WALs/3302f0f507bd,45577,1731026165616/3302f0f507bd%2C45577%2C1731026165616.1731026166750 2024-11-08T00:36:28,202 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:34225/user/jenkins/test-data/a76a474e-2f29-615f-1108-03e596125458/WALs/3302f0f507bd,45577,1731026165616/3302f0f507bd%2C45577%2C1731026165616.1731026166750 2024-11-08T00:36:28,202 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:34225/user/jenkins/test-data/a76a474e-2f29-615f-1108-03e596125458/WALs/3302f0f507bd,45577,1731026165616/3302f0f507bd%2C45577%2C1731026165616.1731026166750 after 0ms 2024-11-08T00:36:28,202 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38603 is added to blk_1073741838_1019 (size=1264) 2024-11-08T00:36:28,202 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/a76a474e-2f29-615f-1108-03e596125458/WALs/3302f0f507bd,45577,1731026165616/3302f0f507bd%2C45577%2C1731026165616.1731026166750 2024-11-08T00:36:28,203 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46657 is added to blk_1073741838_1019 (size=1264) 2024-11-08T00:36:28,203 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:34225/user/jenkins/test-data/a76a474e-2f29-615f-1108-03e596125458/WALs/3302f0f507bd,45577,1731026165616/3302f0f507bd%2C45577%2C1731026165616.1731026180782 is not closed yet, will try archiving it next time 2024-11-08T00:36:28,212 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #3: [\x00/METAFAMILY:HBASE::REGION_EVENT::REGION_OPEN/1731026167721/Put/vlen=218/seqid=0] 2024-11-08T00:36:28,212 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #4: [row1002/info:/1731026177386/Put/vlen=1045/seqid=0] 2024-11-08T00:36:28,212 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/a76a474e-2f29-615f-1108-03e596125458/WALs/3302f0f507bd,45577,1731026165616/3302f0f507bd%2C45577%2C1731026165616.1731026166750 2024-11-08T00:36:28,212 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:34225/user/jenkins/test-data/a76a474e-2f29-615f-1108-03e596125458/WALs/3302f0f507bd,45577,1731026165616/3302f0f507bd%2C45577%2C1731026165616.1731026180782 2024-11-08T00:36:28,212 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:34225/user/jenkins/test-data/a76a474e-2f29-615f-1108-03e596125458/WALs/3302f0f507bd,45577,1731026165616/3302f0f507bd%2C45577%2C1731026165616.1731026180782 2024-11-08T00:36:28,213 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:34225/user/jenkins/test-data/a76a474e-2f29-615f-1108-03e596125458/WALs/3302f0f507bd,45577,1731026165616/3302f0f507bd%2C45577%2C1731026165616.1731026180782 after 0ms 2024-11-08T00:36:28,213 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/a76a474e-2f29-615f-1108-03e596125458/WALs/3302f0f507bd,45577,1731026165616/3302f0f507bd%2C45577%2C1731026165616.1731026180782 2024-11-08T00:36:28,216 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #5: [row1003/info:/1731026180781/Put/vlen=1045/seqid=0] 2024-11-08T00:36:28,216 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #6: [row1004/info:/1731026182796/Put/vlen=1045/seqid=0] 2024-11-08T00:36:28,216 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/a76a474e-2f29-615f-1108-03e596125458/WALs/3302f0f507bd,45577,1731026165616/3302f0f507bd%2C45577%2C1731026165616.1731026180782 2024-11-08T00:36:28,216 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:34225/user/jenkins/test-data/a76a474e-2f29-615f-1108-03e596125458/WALs/3302f0f507bd,45577,1731026165616/3302f0f507bd%2C45577%2C1731026165616.1731026186171 2024-11-08T00:36:28,216 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:34225/user/jenkins/test-data/a76a474e-2f29-615f-1108-03e596125458/WALs/3302f0f507bd,45577,1731026165616/3302f0f507bd%2C45577%2C1731026165616.1731026186171 2024-11-08T00:36:28,217 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:34225/user/jenkins/test-data/a76a474e-2f29-615f-1108-03e596125458/WALs/3302f0f507bd,45577,1731026165616/3302f0f507bd%2C45577%2C1731026165616.1731026186171 after 1ms 2024-11-08T00:36:28,217 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/a76a474e-2f29-615f-1108-03e596125458/WALs/3302f0f507bd,45577,1731026165616/3302f0f507bd%2C45577%2C1731026165616.1731026186171 2024-11-08T00:36:28,220 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #7: [row1005/info:/1731026186170/Put/vlen=1045/seqid=0] 2024-11-08T00:36:28,220 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:34225/user/jenkins/test-data/a76a474e-2f29-615f-1108-03e596125458/WALs/3302f0f507bd,45577,1731026165616/3302f0f507bd%2C45577%2C1731026165616.1731026188182 2024-11-08T00:36:28,220 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:34225/user/jenkins/test-data/a76a474e-2f29-615f-1108-03e596125458/WALs/3302f0f507bd,45577,1731026165616/3302f0f507bd%2C45577%2C1731026165616.1731026188182 2024-11-08T00:36:28,221 WARN [IPC Server handler 0 on default port 34225 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/a76a474e-2f29-615f-1108-03e596125458/WALs/3302f0f507bd,45577,1731026165616/3302f0f507bd%2C45577%2C1731026165616.1731026188182 has not been closed. Lease recovery is in progress. RecoveryId = 1022 for block blk_1073741839_1021 2024-11-08T00:36:28,221 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:34225/user/jenkins/test-data/a76a474e-2f29-615f-1108-03e596125458/WALs/3302f0f507bd,45577,1731026165616/3302f0f507bd%2C45577%2C1731026165616.1731026188182 after 1ms 2024-11-08T00:36:28,615 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741837_1018: GenerationStamp not matched, existing replica is blk_1073741837_1016 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-08T00:36:28,665 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,39151,1731026116929/3302f0f507bd%2C39151%2C1731026116929.1731026117246 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:36:28,669 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.meta.1731026116690.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:36:28,696 WARN [ResponseProcessor for block BP-1358657480-172.17.0.3-1731026163290:blk_1073741839_1021 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1358657480-172.17.0.3-1731026163290:blk_1073741839_1021 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-08T00:36:28,696 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-896153742_22 at /127.0.0.1:34650 [Receiving block BP-1358657480-172.17.0.3-1731026163290:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:46657:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:34650 dst: /127.0.0.1:46657 java.io.InterruptedIOException: Interrupted while waiting for IO on channel java.nio.channels.SocketChannel[connected local=localhost/127.0.0.1:46657 remote=/127.0.0.1:34650]. Total timeout mills is 60000, 59500 millis timeout left. at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:350) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-08T00:36:28,696 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-896153742_22 at /127.0.0.1:39596 [Receiving block BP-1358657480-172.17.0.3-1731026163290:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:38603:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:39596 dst: /127.0.0.1:38603 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-08T00:36:28,697 WARN [DataStreamer for file /user/jenkins/test-data/a76a474e-2f29-615f-1108-03e596125458/WALs/3302f0f507bd,45577,1731026165616/3302f0f507bd%2C45577%2C1731026165616.1731026188182 block BP-1358657480-172.17.0.3-1731026163290:blk_1073741839_1021 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1358657480-172.17.0.3-1731026163290:blk_1073741839_1021 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46657,DS-d33fbd53-d237-460d-9d3e-81af0606845b,DISK], DatanodeInfoWithStorage[127.0.0.1:38603,DS-45f9592c-a91b-4f06-84fe-85833bab1282,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:46657,DS-d33fbd53-d237-460d-9d3e-81af0606845b,DISK]) is bad. 2024-11-08T00:36:28,697 WARN [DataStreamer for file /user/jenkins/test-data/a76a474e-2f29-615f-1108-03e596125458/WALs/3302f0f507bd,45577,1731026165616/3302f0f507bd%2C45577%2C1731026165616.1731026188182 block BP-1358657480-172.17.0.3-1731026163290:blk_1073741839_1021 {}] hdfs.DataStreamer(859): DataStreamer Exception org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1358657480-172.17.0.3-1731026163290:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-08T00:36:28,703 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46657 is added to blk_1073741839_1022 (size=85) 2024-11-08T00:36:28,705 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38603 is added to blk_1073741839_1022 (size=85) 2024-11-08T00:36:29,665 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,39151,1731026116929/3302f0f507bd%2C39151%2C1731026116929.1731026117246 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:36:29,670 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.meta.1731026116690.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:36:30,182 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:34225/user/jenkins/test-data/a76a474e-2f29-615f-1108-03e596125458/WALs/3302f0f507bd,45577,1731026165616/3302f0f507bd%2C45577%2C1731026165616.1731026180782 after 4002ms 2024-11-08T00:36:30,666 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,39151,1731026116929/3302f0f507bd%2C39151%2C1731026116929.1731026117246 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:36:30,671 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.meta.1731026116690.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:36:31,667 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,39151,1731026116929/3302f0f507bd%2C39151%2C1731026116929.1731026117246 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:36:31,671 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.meta.1731026116690.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:36:32,222 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:34225/user/jenkins/test-data/a76a474e-2f29-615f-1108-03e596125458/WALs/3302f0f507bd,45577,1731026165616/3302f0f507bd%2C45577%2C1731026165616.1731026188182 after 4002ms 2024-11-08T00:36:32,222 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/a76a474e-2f29-615f-1108-03e596125458/WALs/3302f0f507bd,45577,1731026165616/3302f0f507bd%2C45577%2C1731026165616.1731026188182 2024-11-08T00:36:32,228 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/a76a474e-2f29-615f-1108-03e596125458/WALs/3302f0f507bd,45577,1731026165616/3302f0f507bd%2C45577%2C1731026165616.1731026188182 2024-11-08T00:36:32,229 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.74 KB heapSize=3.77 KB 2024-11-08T00:36:32,230 ERROR [FSHLog-0-hdfs://localhost:34225/user/jenkins/test-data/a76a474e-2f29-615f-1108-03e596125458-prefix:3302f0f507bd,45577,1731026165616.meta {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43193,DS-45f9592c-a91b-4f06-84fe-85833bab1282,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-08T00:36:32,230 WARN [FSHLog-0-hdfs://localhost:34225/user/jenkins/test-data/a76a474e-2f29-615f-1108-03e596125458-prefix:3302f0f507bd,45577,1731026165616.meta {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43193,DS-45f9592c-a91b-4f06-84fe-85833bab1282,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-08T00:36:32,230 DEBUG [regionserver/3302f0f507bd:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 3302f0f507bd%2C45577%2C1731026165616.meta:.meta(num 1731026167229) roll requested 2024-11-08T00:36:32,230 INFO [regionserver/3302f0f507bd:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3302f0f507bd%2C45577%2C1731026165616.meta.1731026192230.meta 2024-11-08T00:36:32,236 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:36:32,236 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:36:32,236 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:36:32,236 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:36:32,236 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:36:32,236 INFO [regionserver/3302f0f507bd:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/a76a474e-2f29-615f-1108-03e596125458/WALs/3302f0f507bd,45577,1731026165616/3302f0f507bd%2C45577%2C1731026165616.meta.1731026167229.meta with entries=8, filesize=2.36 KB; new WAL /user/jenkins/test-data/a76a474e-2f29-615f-1108-03e596125458/WALs/3302f0f507bd,45577,1731026165616/3302f0f507bd%2C45577%2C1731026165616.meta.1731026192230.meta 2024-11-08T00:36:32,236 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43193,DS-45f9592c-a91b-4f06-84fe-85833bab1282,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-08T00:36:32,237 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43193,DS-45f9592c-a91b-4f06-84fe-85833bab1282,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-08T00:36:32,237 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:34225/user/jenkins/test-data/a76a474e-2f29-615f-1108-03e596125458/WALs/3302f0f507bd,45577,1731026165616/3302f0f507bd%2C45577%2C1731026165616.meta.1731026167229.meta 2024-11-08T00:36:32,237 WARN [IPC Server handler 1 on default port 34225 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/a76a474e-2f29-615f-1108-03e596125458/WALs/3302f0f507bd,45577,1731026165616/3302f0f507bd%2C45577%2C1731026165616.meta.1731026167229.meta has not been closed. Lease recovery is in progress. RecoveryId = 1024 for block blk_1073741834_1014 2024-11-08T00:36:32,237 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:34225/user/jenkins/test-data/a76a474e-2f29-615f-1108-03e596125458/WALs/3302f0f507bd,45577,1731026165616/3302f0f507bd%2C45577%2C1731026165616.meta.1731026167229.meta after 0ms 2024-11-08T00:36:32,240 DEBUG [regionserver/3302f0f507bd:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38161:38161),(127.0.0.1/127.0.0.1:42849:42849)] 2024-11-08T00:36:32,240 DEBUG [regionserver/3302f0f507bd:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:34225/user/jenkins/test-data/a76a474e-2f29-615f-1108-03e596125458/WALs/3302f0f507bd,45577,1731026165616/3302f0f507bd%2C45577%2C1731026165616.meta.1731026167229.meta is not closed yet, will try archiving it next time 2024-11-08T00:36:32,258 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34225/user/jenkins/test-data/a76a474e-2f29-615f-1108-03e596125458/data/hbase/meta/1588230740/.tmp/info/242e16a92cd84bfa9feb547e10286b35 is 207, key is TestLogRolling-testLogRollOnPipelineRestart,,1731026167365.7af40624c00ea502246dccaba597f89c./info:regioninfo/1731026167726/Put/seqid=0 2024-11-08T00:36:32,268 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38603 is added to blk_1073741841_1025 (size=7125) 2024-11-08T00:36:32,268 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46657 is added to blk_1073741841_1025 (size=7125) 2024-11-08T00:36:32,268 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.52 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:34225/user/jenkins/test-data/a76a474e-2f29-615f-1108-03e596125458/data/hbase/meta/1588230740/.tmp/info/242e16a92cd84bfa9feb547e10286b35 2024-11-08T00:36:32,289 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34225/user/jenkins/test-data/a76a474e-2f29-615f-1108-03e596125458/data/hbase/meta/1588230740/.tmp/ns/27f066be1c8d479489331d91fa633290 is 43, key is default/ns:d/1731026167317/Put/seqid=0 2024-11-08T00:36:32,293 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38603 is added to blk_1073741842_1026 (size=5153) 2024-11-08T00:36:32,294 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46657 is added to blk_1073741842_1026 (size=5153) 2024-11-08T00:36:32,294 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:34225/user/jenkins/test-data/a76a474e-2f29-615f-1108-03e596125458/data/hbase/meta/1588230740/.tmp/ns/27f066be1c8d479489331d91fa633290 2024-11-08T00:36:32,320 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34225/user/jenkins/test-data/a76a474e-2f29-615f-1108-03e596125458/data/hbase/meta/1588230740/.tmp/table/1b521a2d38574d479e9e7fbd390933ce is 79, key is TestLogRolling-testLogRollOnPipelineRestart/table:state/1731026167736/Put/seqid=0 2024-11-08T00:36:32,324 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46657 is added to blk_1073741843_1027 (size=5438) 2024-11-08T00:36:32,324 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38603 is added to blk_1073741843_1027 (size=5438) 2024-11-08T00:36:32,325 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=150 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:34225/user/jenkins/test-data/a76a474e-2f29-615f-1108-03e596125458/data/hbase/meta/1588230740/.tmp/table/1b521a2d38574d479e9e7fbd390933ce 2024-11-08T00:36:32,331 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34225/user/jenkins/test-data/a76a474e-2f29-615f-1108-03e596125458/data/hbase/meta/1588230740/.tmp/info/242e16a92cd84bfa9feb547e10286b35 as hdfs://localhost:34225/user/jenkins/test-data/a76a474e-2f29-615f-1108-03e596125458/data/hbase/meta/1588230740/info/242e16a92cd84bfa9feb547e10286b35 2024-11-08T00:36:32,338 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34225/user/jenkins/test-data/a76a474e-2f29-615f-1108-03e596125458/data/hbase/meta/1588230740/info/242e16a92cd84bfa9feb547e10286b35, entries=10, sequenceid=11, filesize=7.0 K 2024-11-08T00:36:32,339 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34225/user/jenkins/test-data/a76a474e-2f29-615f-1108-03e596125458/data/hbase/meta/1588230740/.tmp/ns/27f066be1c8d479489331d91fa633290 as hdfs://localhost:34225/user/jenkins/test-data/a76a474e-2f29-615f-1108-03e596125458/data/hbase/meta/1588230740/ns/27f066be1c8d479489331d91fa633290 2024-11-08T00:36:32,345 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34225/user/jenkins/test-data/a76a474e-2f29-615f-1108-03e596125458/data/hbase/meta/1588230740/ns/27f066be1c8d479489331d91fa633290, entries=2, sequenceid=11, filesize=5.0 K 2024-11-08T00:36:32,346 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34225/user/jenkins/test-data/a76a474e-2f29-615f-1108-03e596125458/data/hbase/meta/1588230740/.tmp/table/1b521a2d38574d479e9e7fbd390933ce as hdfs://localhost:34225/user/jenkins/test-data/a76a474e-2f29-615f-1108-03e596125458/data/hbase/meta/1588230740/table/1b521a2d38574d479e9e7fbd390933ce 2024-11-08T00:36:32,352 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34225/user/jenkins/test-data/a76a474e-2f29-615f-1108-03e596125458/data/hbase/meta/1588230740/table/1b521a2d38574d479e9e7fbd390933ce, entries=2, sequenceid=11, filesize=5.3 K 2024-11-08T00:36:32,353 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~1.74 KB/1782, heapSize ~3.48 KB/3560, currentSize=0 B/0 for 1588230740 in 124ms, sequenceid=11, compaction requested=false 2024-11-08T00:36:32,353 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-08T00:36:32,353 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 7af40624c00ea502246dccaba597f89c 1/1 column families, dataSize=4.20 KB heapSize=4.75 KB 2024-11-08T00:36:32,353 ERROR [FSHLog-0-hdfs://localhost:34225/user/jenkins/test-data/a76a474e-2f29-615f-1108-03e596125458-prefix:3302f0f507bd,45577,1731026165616 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1358657480-172.17.0.3-1731026163290:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-08T00:36:32,354 WARN [FSHLog-0-hdfs://localhost:34225/user/jenkins/test-data/a76a474e-2f29-615f-1108-03e596125458-prefix:3302f0f507bd,45577,1731026165616 {}] wal.AbstractFSWAL(2174): append entry failed org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1358657480-172.17.0.3-1731026163290:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-08T00:36:32,354 DEBUG [regionserver/3302f0f507bd:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 3302f0f507bd%2C45577%2C1731026165616:(num 1731026188182) roll requested 2024-11-08T00:36:32,354 INFO [regionserver/3302f0f507bd:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3302f0f507bd%2C45577%2C1731026165616.1731026192354 2024-11-08T00:36:32,359 DEBUG [regionserver/3302f0f507bd:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:34225/user/jenkins/test-data/a76a474e-2f29-615f-1108-03e596125458/WALs/3302f0f507bd,45577,1731026165616/3302f0f507bd%2C45577%2C1731026165616.1731026188182 newFile=hdfs://localhost:34225/user/jenkins/test-data/a76a474e-2f29-615f-1108-03e596125458/WALs/3302f0f507bd,45577,1731026165616/3302f0f507bd%2C45577%2C1731026165616.1731026192354 2024-11-08T00:36:32,359 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:36:32,359 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:36:32,359 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:36:32,359 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:36:32,359 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:36:32,359 INFO [regionserver/3302f0f507bd:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/a76a474e-2f29-615f-1108-03e596125458/WALs/3302f0f507bd,45577,1731026165616/3302f0f507bd%2C45577%2C1731026165616.1731026188182 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/a76a474e-2f29-615f-1108-03e596125458/WALs/3302f0f507bd,45577,1731026165616/3302f0f507bd%2C45577%2C1731026165616.1731026192354 2024-11-08T00:36:32,360 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1358657480-172.17.0.3-1731026163290:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-08T00:36:32,360 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1358657480-172.17.0.3-1731026163290:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-08T00:36:32,360 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:34225/user/jenkins/test-data/a76a474e-2f29-615f-1108-03e596125458/WALs/3302f0f507bd,45577,1731026165616/3302f0f507bd%2C45577%2C1731026165616.1731026188182 2024-11-08T00:36:32,361 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:34225/user/jenkins/test-data/a76a474e-2f29-615f-1108-03e596125458/WALs/3302f0f507bd,45577,1731026165616/3302f0f507bd%2C45577%2C1731026165616.1731026188182 after 1ms 2024-11-08T00:36:32,364 DEBUG [regionserver/3302f0f507bd:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38161:38161),(127.0.0.1/127.0.0.1:42849:42849)] 2024-11-08T00:36:32,364 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:34225/user/jenkins/test-data/a76a474e-2f29-615f-1108-03e596125458/WALs/3302f0f507bd,45577,1731026165616/3302f0f507bd%2C45577%2C1731026165616.1731026188182 to hdfs://localhost:34225/user/jenkins/test-data/a76a474e-2f29-615f-1108-03e596125458/oldWALs/3302f0f507bd%2C45577%2C1731026165616.1731026188182 2024-11-08T00:36:32,379 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34225/user/jenkins/test-data/a76a474e-2f29-615f-1108-03e596125458/data/default/TestLogRolling-testLogRollOnPipelineRestart/7af40624c00ea502246dccaba597f89c/.tmp/info/384017bdaf5441cd89d4ee3a1348d541 is 1080, key is row1002/info:/1731026177386/Put/seqid=0 2024-11-08T00:36:32,384 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38603 is added to blk_1073741845_1029 (size=9270) 2024-11-08T00:36:32,384 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46657 is added to blk_1073741845_1029 (size=9270) 2024-11-08T00:36:32,384 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.20 KB at sequenceid=8 (bloomFilter=true), to=hdfs://localhost:34225/user/jenkins/test-data/a76a474e-2f29-615f-1108-03e596125458/data/default/TestLogRolling-testLogRollOnPipelineRestart/7af40624c00ea502246dccaba597f89c/.tmp/info/384017bdaf5441cd89d4ee3a1348d541 2024-11-08T00:36:32,391 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34225/user/jenkins/test-data/a76a474e-2f29-615f-1108-03e596125458/data/default/TestLogRolling-testLogRollOnPipelineRestart/7af40624c00ea502246dccaba597f89c/.tmp/info/384017bdaf5441cd89d4ee3a1348d541 as hdfs://localhost:34225/user/jenkins/test-data/a76a474e-2f29-615f-1108-03e596125458/data/default/TestLogRolling-testLogRollOnPipelineRestart/7af40624c00ea502246dccaba597f89c/info/384017bdaf5441cd89d4ee3a1348d541 2024-11-08T00:36:32,397 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34225/user/jenkins/test-data/a76a474e-2f29-615f-1108-03e596125458/data/default/TestLogRolling-testLogRollOnPipelineRestart/7af40624c00ea502246dccaba597f89c/info/384017bdaf5441cd89d4ee3a1348d541, entries=4, sequenceid=8, filesize=9.1 K 2024-11-08T00:36:32,398 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~4.20 KB/4304, heapSize ~4.73 KB/4848, currentSize=0 B/0 for 7af40624c00ea502246dccaba597f89c in 45ms, sequenceid=8, compaction requested=false 2024-11-08T00:36:32,398 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 7af40624c00ea502246dccaba597f89c: 2024-11-08T00:36:32,403 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-08T00:36:32,403 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-08T00:36:32,404 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-08T00:36:32,404 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-08T00:36:32,404 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-08T00:36:32,404 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-08T00:36:32,404 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-08T00:36:32,404 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=434108217, stopped=false 2024-11-08T00:36:32,404 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=3302f0f507bd,39533,1731026165454 2024-11-08T00:36:32,496 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45577-0x10117df1a690001, quorum=127.0.0.1:60383, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-08T00:36:32,496 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39533-0x10117df1a690000, quorum=127.0.0.1:60383, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-08T00:36:32,496 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45577-0x10117df1a690001, quorum=127.0.0.1:60383, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T00:36:32,496 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39533-0x10117df1a690000, quorum=127.0.0.1:60383, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T00:36:32,496 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-08T00:36:32,497 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:39533-0x10117df1a690000, quorum=127.0.0.1:60383, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-08T00:36:32,497 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-08T00:36:32,498 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-08T00:36:32,498 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:45577-0x10117df1a690001, quorum=127.0.0.1:60383, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-08T00:36:32,498 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-08T00:36:32,498 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '3302f0f507bd,45577,1731026165616' ***** 2024-11-08T00:36:32,498 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-08T00:36:32,499 INFO [RS:0;3302f0f507bd:45577 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-08T00:36:32,499 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-08T00:36:32,499 INFO [RS:0;3302f0f507bd:45577 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-08T00:36:32,499 INFO [RS:0;3302f0f507bd:45577 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-08T00:36:32,499 INFO [RS:0;3302f0f507bd:45577 {}] regionserver.HRegionServer(3091): Received CLOSE for 7af40624c00ea502246dccaba597f89c 2024-11-08T00:36:32,499 INFO [RS:0;3302f0f507bd:45577 {}] regionserver.HRegionServer(959): stopping server 3302f0f507bd,45577,1731026165616 2024-11-08T00:36:32,500 INFO [RS:0;3302f0f507bd:45577 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-08T00:36:32,500 INFO [RS:0;3302f0f507bd:45577 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;3302f0f507bd:45577. 2024-11-08T00:36:32,500 DEBUG [RS_CLOSE_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 7af40624c00ea502246dccaba597f89c, disabling compactions & flushes 2024-11-08T00:36:32,500 DEBUG [RS:0;3302f0f507bd:45577 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-08T00:36:32,500 INFO [RS_CLOSE_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnPipelineRestart,,1731026167365.7af40624c00ea502246dccaba597f89c. 2024-11-08T00:36:32,500 DEBUG [RS:0;3302f0f507bd:45577 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-08T00:36:32,500 DEBUG [RS_CLOSE_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnPipelineRestart,,1731026167365.7af40624c00ea502246dccaba597f89c. 2024-11-08T00:36:32,500 DEBUG [RS_CLOSE_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnPipelineRestart,,1731026167365.7af40624c00ea502246dccaba597f89c. after waiting 0 ms 2024-11-08T00:36:32,500 DEBUG [RS_CLOSE_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnPipelineRestart,,1731026167365.7af40624c00ea502246dccaba597f89c. 2024-11-08T00:36:32,500 INFO [RS:0;3302f0f507bd:45577 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-08T00:36:32,500 INFO [RS:0;3302f0f507bd:45577 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-08T00:36:32,500 INFO [RS:0;3302f0f507bd:45577 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-08T00:36:32,500 INFO [RS:0;3302f0f507bd:45577 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-08T00:36:32,500 INFO [RS:0;3302f0f507bd:45577 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-08T00:36:32,500 DEBUG [RS:0;3302f0f507bd:45577 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, 7af40624c00ea502246dccaba597f89c=TestLogRolling-testLogRollOnPipelineRestart,,1731026167365.7af40624c00ea502246dccaba597f89c.} 2024-11-08T00:36:32,500 DEBUG [RS:0;3302f0f507bd:45577 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 7af40624c00ea502246dccaba597f89c 2024-11-08T00:36:32,501 DEBUG [RS_CLOSE_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-08T00:36:32,501 INFO [RS_CLOSE_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-08T00:36:32,501 DEBUG [RS_CLOSE_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-08T00:36:32,501 DEBUG [RS_CLOSE_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-08T00:36:32,501 DEBUG [RS_CLOSE_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-08T00:36:32,504 DEBUG [RS_CLOSE_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34225/user/jenkins/test-data/a76a474e-2f29-615f-1108-03e596125458/data/default/TestLogRolling-testLogRollOnPipelineRestart/7af40624c00ea502246dccaba597f89c/recovered.edits/11.seqid, newMaxSeqId=11, maxSeqId=1 2024-11-08T00:36:32,505 DEBUG [RS_CLOSE_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34225/user/jenkins/test-data/a76a474e-2f29-615f-1108-03e596125458/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-08T00:36:32,505 DEBUG [RS_CLOSE_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-08T00:36:32,505 INFO [RS_CLOSE_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnPipelineRestart,,1731026167365.7af40624c00ea502246dccaba597f89c. 2024-11-08T00:36:32,505 INFO [RS_CLOSE_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-08T00:36:32,505 DEBUG [RS_CLOSE_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 7af40624c00ea502246dccaba597f89c: Waiting for close lock at 1731026192500Running coprocessor pre-close hooks at 1731026192500Disabling compacts and flushes for region at 1731026192500Disabling writes for close at 1731026192500Writing region close event to WAL at 1731026192501 (+1 ms)Running coprocessor post-close hooks at 1731026192505 (+4 ms)Closed at 1731026192505 2024-11-08T00:36:32,505 DEBUG [RS_CLOSE_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731026192500Running coprocessor pre-close hooks at 1731026192500Disabling compacts and flushes for region at 1731026192500Disabling writes for close at 1731026192501 (+1 ms)Writing region close event to WAL at 1731026192502 (+1 ms)Running coprocessor post-close hooks at 1731026192505 (+3 ms)Closed at 1731026192505 2024-11-08T00:36:32,506 DEBUG [RS_CLOSE_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-08T00:36:32,506 DEBUG [RS_CLOSE_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRollOnPipelineRestart,,1731026167365.7af40624c00ea502246dccaba597f89c. 2024-11-08T00:36:32,616 INFO [regionserver/3302f0f507bd:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-08T00:36:32,668 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,39151,1731026116929/3302f0f507bd%2C39151%2C1731026116929.1731026117246 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:36:32,669 INFO [regionserver/3302f0f507bd:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-08T00:36:32,669 INFO [regionserver/3302f0f507bd:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-08T00:36:32,672 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.meta.1731026116690.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:36:32,701 INFO [RS:0;3302f0f507bd:45577 {}] regionserver.HRegionServer(976): stopping server 3302f0f507bd,45577,1731026165616; all regions closed. 2024-11-08T00:36:32,701 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:36:32,701 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:36:32,701 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:36:32,701 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:36:32,701 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:36:32,703 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46657 is added to blk_1073741840_1023 (size=825) 2024-11-08T00:36:32,703 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38603 is added to blk_1073741840_1023 (size=825) 2024-11-08T00:36:33,668 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,39151,1731026116929/3302f0f507bd%2C39151%2C1731026116929.1731026117246 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:36:33,672 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.meta.1731026116690.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:36:34,669 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,39151,1731026116929/3302f0f507bd%2C39151%2C1731026116929.1731026117246 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:36:34,673 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.meta.1731026116690.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:36:34,695 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741834_1014: GenerationStamp not matched, existing replica is blk_1073741834_1010 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-08T00:36:35,436 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-08T00:36:35,670 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,39151,1731026116929/3302f0f507bd%2C39151%2C1731026116929.1731026117246 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:36:35,673 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.meta.1731026116690.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:36:36,238 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:34225/user/jenkins/test-data/a76a474e-2f29-615f-1108-03e596125458/WALs/3302f0f507bd,45577,1731026165616/3302f0f507bd%2C45577%2C1731026165616.meta.1731026167229.meta after 4001ms 2024-11-08T00:36:36,239 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:34225/user/jenkins/test-data/a76a474e-2f29-615f-1108-03e596125458/WALs/3302f0f507bd,45577,1731026165616/3302f0f507bd%2C45577%2C1731026165616.meta.1731026167229.meta to hdfs://localhost:34225/user/jenkins/test-data/a76a474e-2f29-615f-1108-03e596125458/oldWALs/3302f0f507bd%2C45577%2C1731026165616.meta.1731026167229.meta 2024-11-08T00:36:36,241 DEBUG [RS:0;3302f0f507bd:45577 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/a76a474e-2f29-615f-1108-03e596125458/oldWALs 2024-11-08T00:36:36,241 INFO [RS:0;3302f0f507bd:45577 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 3302f0f507bd%2C45577%2C1731026165616.meta:.meta(num 1731026192230) 2024-11-08T00:36:36,242 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:36:36,242 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:36:36,242 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:36:36,242 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:36:36,242 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:36:36,244 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46657 is added to blk_1073741844_1028 (size=1162) 2024-11-08T00:36:36,244 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38603 is added to blk_1073741844_1028 (size=1162) 2024-11-08T00:36:36,250 DEBUG [RS:0;3302f0f507bd:45577 {}] wal.AbstractFSWAL(1256): Moved 4 WAL file(s) to /user/jenkins/test-data/a76a474e-2f29-615f-1108-03e596125458/oldWALs 2024-11-08T00:36:36,250 INFO [RS:0;3302f0f507bd:45577 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 3302f0f507bd%2C45577%2C1731026165616:(num 1731026192354) 2024-11-08T00:36:36,251 DEBUG [RS:0;3302f0f507bd:45577 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-08T00:36:36,251 INFO [RS:0;3302f0f507bd:45577 {}] regionserver.LeaseManager(133): Closed leases 2024-11-08T00:36:36,251 INFO [RS:0;3302f0f507bd:45577 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-08T00:36:36,251 INFO [RS:0;3302f0f507bd:45577 {}] hbase.ChoreService(370): Chore service for: regionserver/3302f0f507bd:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-08T00:36:36,251 INFO [RS:0;3302f0f507bd:45577 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-08T00:36:36,251 INFO [regionserver/3302f0f507bd:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-08T00:36:36,252 INFO [RS:0;3302f0f507bd:45577 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:45577 2024-11-08T00:36:36,296 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39533-0x10117df1a690000, quorum=127.0.0.1:60383, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-08T00:36:36,296 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45577-0x10117df1a690001, quorum=127.0.0.1:60383, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/3302f0f507bd,45577,1731026165616 2024-11-08T00:36:36,296 INFO [RS:0;3302f0f507bd:45577 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-08T00:36:36,306 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [3302f0f507bd,45577,1731026165616] 2024-11-08T00:36:36,317 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/3302f0f507bd,45577,1731026165616 already deleted, retry=false 2024-11-08T00:36:36,317 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 3302f0f507bd,45577,1731026165616 expired; onlineServers=0 2024-11-08T00:36:36,317 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '3302f0f507bd,39533,1731026165454' ***** 2024-11-08T00:36:36,317 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-08T00:36:36,317 INFO [M:0;3302f0f507bd:39533 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-08T00:36:36,317 INFO [M:0;3302f0f507bd:39533 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-08T00:36:36,317 DEBUG [M:0;3302f0f507bd:39533 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-08T00:36:36,317 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-08T00:36:36,317 DEBUG [M:0;3302f0f507bd:39533 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-08T00:36:36,317 DEBUG [master/3302f0f507bd:0:becomeActiveMaster-HFileCleaner.large.0-1731026166531 {}] cleaner.HFileCleaner(306): Exit Thread[master/3302f0f507bd:0:becomeActiveMaster-HFileCleaner.large.0-1731026166531,5,FailOnTimeoutGroup] 2024-11-08T00:36:36,317 DEBUG [master/3302f0f507bd:0:becomeActiveMaster-HFileCleaner.small.0-1731026166531 {}] cleaner.HFileCleaner(306): Exit Thread[master/3302f0f507bd:0:becomeActiveMaster-HFileCleaner.small.0-1731026166531,5,FailOnTimeoutGroup] 2024-11-08T00:36:36,318 INFO [M:0;3302f0f507bd:39533 {}] hbase.ChoreService(370): Chore service for: master/3302f0f507bd:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-08T00:36:36,318 INFO [M:0;3302f0f507bd:39533 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-08T00:36:36,318 DEBUG [M:0;3302f0f507bd:39533 {}] master.HMaster(1795): Stopping service threads 2024-11-08T00:36:36,318 INFO [M:0;3302f0f507bd:39533 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-08T00:36:36,318 INFO [M:0;3302f0f507bd:39533 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-08T00:36:36,318 INFO [M:0;3302f0f507bd:39533 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-08T00:36:36,318 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-08T00:36:36,327 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39533-0x10117df1a690000, quorum=127.0.0.1:60383, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-08T00:36:36,327 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39533-0x10117df1a690000, quorum=127.0.0.1:60383, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T00:36:36,338 DEBUG [M:0;3302f0f507bd:39533 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/master already deleted, retry=false 2024-11-08T00:36:36,338 DEBUG [M:0;3302f0f507bd:39533 {}] master.ActiveMasterManager(353): master:39533-0x10117df1a690000, quorum=127.0.0.1:60383, baseZNode=/hbase Failed delete of our master address node; KeeperErrorCode = NoNode for /hbase/master 2024-11-08T00:36:36,338 INFO [M:0;3302f0f507bd:39533 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:34225/user/jenkins/test-data/a76a474e-2f29-615f-1108-03e596125458/.lastflushedseqids 2024-11-08T00:36:36,345 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38603 is added to blk_1073741846_1030 (size=111) 2024-11-08T00:36:36,345 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46657 is added to blk_1073741846_1030 (size=111) 2024-11-08T00:36:36,345 INFO [M:0;3302f0f507bd:39533 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-08T00:36:36,346 INFO [M:0;3302f0f507bd:39533 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-08T00:36:36,346 DEBUG [M:0;3302f0f507bd:39533 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-08T00:36:36,346 INFO [M:0;3302f0f507bd:39533 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-08T00:36:36,346 DEBUG [M:0;3302f0f507bd:39533 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-08T00:36:36,346 DEBUG [M:0;3302f0f507bd:39533 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-08T00:36:36,346 DEBUG [M:0;3302f0f507bd:39533 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-08T00:36:36,346 INFO [M:0;3302f0f507bd:39533 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.18 KB heapSize=29.16 KB 2024-11-08T00:36:36,347 ERROR [FSHLog-0-hdfs://localhost:34225/user/jenkins/test-data/a76a474e-2f29-615f-1108-03e596125458/MasterData-prefix:3302f0f507bd,39533,1731026165454 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43193,DS-45f9592c-a91b-4f06-84fe-85833bab1282,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-08T00:36:36,347 WARN [FSHLog-0-hdfs://localhost:34225/user/jenkins/test-data/a76a474e-2f29-615f-1108-03e596125458/MasterData-prefix:3302f0f507bd,39533,1731026165454 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43193,DS-45f9592c-a91b-4f06-84fe-85833bab1282,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-08T00:36:36,347 DEBUG [master:store-WAL-Roller {}] wal.AbstractWALRoller(198): WAL FSHLog 3302f0f507bd%2C39533%2C1731026165454:(num 1731026166160) roll requested 2024-11-08T00:36:36,347 INFO [master:store-WAL-Roller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3302f0f507bd%2C39533%2C1731026165454.1731026196347 2024-11-08T00:36:36,354 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:36:36,354 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:36:36,354 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:36:36,354 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:36:36,354 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:36:36,354 INFO [master:store-WAL-Roller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/a76a474e-2f29-615f-1108-03e596125458/MasterData/WALs/3302f0f507bd,39533,1731026165454/3302f0f507bd%2C39533%2C1731026165454.1731026166160 with entries=53, filesize=26.63 KB; new WAL /user/jenkins/test-data/a76a474e-2f29-615f-1108-03e596125458/MasterData/WALs/3302f0f507bd,39533,1731026165454/3302f0f507bd%2C39533%2C1731026165454.1731026196347 2024-11-08T00:36:36,355 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43193,DS-45f9592c-a91b-4f06-84fe-85833bab1282,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-08T00:36:36,355 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43193,DS-45f9592c-a91b-4f06-84fe-85833bab1282,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-08T00:36:36,355 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:34225/user/jenkins/test-data/a76a474e-2f29-615f-1108-03e596125458/MasterData/WALs/3302f0f507bd,39533,1731026165454/3302f0f507bd%2C39533%2C1731026165454.1731026166160 2024-11-08T00:36:36,355 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42849:42849),(127.0.0.1/127.0.0.1:38161:38161)] 2024-11-08T00:36:36,355 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(879): hdfs://localhost:34225/user/jenkins/test-data/a76a474e-2f29-615f-1108-03e596125458/MasterData/WALs/3302f0f507bd,39533,1731026165454/3302f0f507bd%2C39533%2C1731026165454.1731026166160 is not closed yet, will try archiving it next time 2024-11-08T00:36:36,356 WARN [IPC Server handler 1 on default port 34225 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/a76a474e-2f29-615f-1108-03e596125458/MasterData/WALs/3302f0f507bd,39533,1731026165454/3302f0f507bd%2C39533%2C1731026165454.1731026166160 has not been closed. Lease recovery is in progress. RecoveryId = 1032 for block blk_1073741830_1015 2024-11-08T00:36:36,356 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:34225/user/jenkins/test-data/a76a474e-2f29-615f-1108-03e596125458/MasterData/WALs/3302f0f507bd,39533,1731026165454/3302f0f507bd%2C39533%2C1731026165454.1731026166160 after 1ms 2024-11-08T00:36:36,372 DEBUG [M:0;3302f0f507bd:39533 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34225/user/jenkins/test-data/a76a474e-2f29-615f-1108-03e596125458/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/2cc735ea6c404973b040faa60e3b6676 is 82, key is hbase:meta,,1/info:regioninfo/1731026167259/Put/seqid=0 2024-11-08T00:36:36,385 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46657 is added to blk_1073741848_1033 (size=5672) 2024-11-08T00:36:36,385 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38603 is added to blk_1073741848_1033 (size=5672) 2024-11-08T00:36:36,386 INFO [M:0;3302f0f507bd:39533 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:34225/user/jenkins/test-data/a76a474e-2f29-615f-1108-03e596125458/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/2cc735ea6c404973b040faa60e3b6676 2024-11-08T00:36:36,406 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45577-0x10117df1a690001, quorum=127.0.0.1:60383, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-08T00:36:36,406 INFO [RS:0;3302f0f507bd:45577 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-08T00:36:36,406 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45577-0x10117df1a690001, quorum=127.0.0.1:60383, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-08T00:36:36,406 INFO [RS:0;3302f0f507bd:45577 {}] regionserver.HRegionServer(1031): Exiting; stopping=3302f0f507bd,45577,1731026165616; zookeeper connection closed. 2024-11-08T00:36:36,407 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@4fbcc1c2 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@4fbcc1c2 2024-11-08T00:36:36,407 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-08T00:36:36,408 DEBUG [M:0;3302f0f507bd:39533 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34225/user/jenkins/test-data/a76a474e-2f29-615f-1108-03e596125458/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/219c4fa9e4a34df48adcdc54c7af0dad is 779, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731026167741/Put/seqid=0 2024-11-08T00:36:36,413 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38603 is added to blk_1073741849_1034 (size=6119) 2024-11-08T00:36:36,414 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46657 is added to blk_1073741849_1034 (size=6119) 2024-11-08T00:36:36,414 INFO [M:0;3302f0f507bd:39533 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.58 KB at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:34225/user/jenkins/test-data/a76a474e-2f29-615f-1108-03e596125458/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/219c4fa9e4a34df48adcdc54c7af0dad 2024-11-08T00:36:36,434 DEBUG [M:0;3302f0f507bd:39533 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34225/user/jenkins/test-data/a76a474e-2f29-615f-1108-03e596125458/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/3a6122459e39400b9f57d26be45e09d2 is 69, key is 3302f0f507bd,45577,1731026165616/rs:state/1731026166593/Put/seqid=0 2024-11-08T00:36:36,438 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38603 is added to blk_1073741850_1035 (size=5156) 2024-11-08T00:36:36,439 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46657 is added to blk_1073741850_1035 (size=5156) 2024-11-08T00:36:36,439 INFO [M:0;3302f0f507bd:39533 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:34225/user/jenkins/test-data/a76a474e-2f29-615f-1108-03e596125458/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/3a6122459e39400b9f57d26be45e09d2 2024-11-08T00:36:36,460 DEBUG [M:0;3302f0f507bd:39533 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34225/user/jenkins/test-data/a76a474e-2f29-615f-1108-03e596125458/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/cc00fa604c0349f0ae23b45cc4deb7f0 is 52, key is load_balancer_on/state:d/1731026167359/Put/seqid=0 2024-11-08T00:36:36,464 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38603 is added to blk_1073741851_1036 (size=5056) 2024-11-08T00:36:36,464 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46657 is added to blk_1073741851_1036 (size=5056) 2024-11-08T00:36:36,465 INFO [M:0;3302f0f507bd:39533 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:34225/user/jenkins/test-data/a76a474e-2f29-615f-1108-03e596125458/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/cc00fa604c0349f0ae23b45cc4deb7f0 2024-11-08T00:36:36,471 DEBUG [M:0;3302f0f507bd:39533 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34225/user/jenkins/test-data/a76a474e-2f29-615f-1108-03e596125458/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/2cc735ea6c404973b040faa60e3b6676 as hdfs://localhost:34225/user/jenkins/test-data/a76a474e-2f29-615f-1108-03e596125458/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/2cc735ea6c404973b040faa60e3b6676 2024-11-08T00:36:36,477 INFO [M:0;3302f0f507bd:39533 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34225/user/jenkins/test-data/a76a474e-2f29-615f-1108-03e596125458/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/2cc735ea6c404973b040faa60e3b6676, entries=8, sequenceid=56, filesize=5.5 K 2024-11-08T00:36:36,478 DEBUG [M:0;3302f0f507bd:39533 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34225/user/jenkins/test-data/a76a474e-2f29-615f-1108-03e596125458/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/219c4fa9e4a34df48adcdc54c7af0dad as hdfs://localhost:34225/user/jenkins/test-data/a76a474e-2f29-615f-1108-03e596125458/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/219c4fa9e4a34df48adcdc54c7af0dad 2024-11-08T00:36:36,483 INFO [M:0;3302f0f507bd:39533 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34225/user/jenkins/test-data/a76a474e-2f29-615f-1108-03e596125458/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/219c4fa9e4a34df48adcdc54c7af0dad, entries=6, sequenceid=56, filesize=6.0 K 2024-11-08T00:36:36,484 DEBUG [M:0;3302f0f507bd:39533 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34225/user/jenkins/test-data/a76a474e-2f29-615f-1108-03e596125458/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/3a6122459e39400b9f57d26be45e09d2 as hdfs://localhost:34225/user/jenkins/test-data/a76a474e-2f29-615f-1108-03e596125458/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/3a6122459e39400b9f57d26be45e09d2 2024-11-08T00:36:36,490 INFO [M:0;3302f0f507bd:39533 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34225/user/jenkins/test-data/a76a474e-2f29-615f-1108-03e596125458/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/3a6122459e39400b9f57d26be45e09d2, entries=1, sequenceid=56, filesize=5.0 K 2024-11-08T00:36:36,491 DEBUG [M:0;3302f0f507bd:39533 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34225/user/jenkins/test-data/a76a474e-2f29-615f-1108-03e596125458/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/cc00fa604c0349f0ae23b45cc4deb7f0 as hdfs://localhost:34225/user/jenkins/test-data/a76a474e-2f29-615f-1108-03e596125458/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/cc00fa604c0349f0ae23b45cc4deb7f0 2024-11-08T00:36:36,497 INFO [M:0;3302f0f507bd:39533 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34225/user/jenkins/test-data/a76a474e-2f29-615f-1108-03e596125458/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/cc00fa604c0349f0ae23b45cc4deb7f0, entries=1, sequenceid=56, filesize=4.9 K 2024-11-08T00:36:36,498 INFO [M:0;3302f0f507bd:39533 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.18 KB/23738, heapSize ~29.10 KB/29800, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 152ms, sequenceid=56, compaction requested=false 2024-11-08T00:36:36,500 INFO [M:0;3302f0f507bd:39533 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-08T00:36:36,500 DEBUG [M:0;3302f0f507bd:39533 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731026196346Disabling compacts and flushes for region at 1731026196346Disabling writes for close at 1731026196346Obtaining lock to block concurrent updates at 1731026196346Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731026196346Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23738, getHeapSize=29800, getOffHeapSize=0, getCellsCount=67 at 1731026196347 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731026196356 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731026196356Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731026196371 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731026196371Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731026196392 (+21 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731026196408 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731026196408Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731026196419 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731026196433 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731026196433Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731026196444 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731026196459 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731026196459Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2fade04: reopening flushed file at 1731026196470 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@420f2df7: reopening flushed file at 1731026196477 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3238cd2a: reopening flushed file at 1731026196484 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6d1bc16f: reopening flushed file at 1731026196490 (+6 ms)Finished flush of dataSize ~23.18 KB/23738, heapSize ~29.10 KB/29800, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 152ms, sequenceid=56, compaction requested=false at 1731026196498 (+8 ms)Writing region close event to WAL at 1731026196500 (+2 ms)Closed at 1731026196500 2024-11-08T00:36:36,500 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:36:36,500 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:36:36,501 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:36:36,501 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:36:36,501 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:36:36,502 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38603 is added to blk_1073741847_1031 (size=757) 2024-11-08T00:36:36,502 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46657 is added to blk_1073741847_1031 (size=757) 2024-11-08T00:36:36,671 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,39151,1731026116929/3302f0f507bd%2C39151%2C1731026116929.1731026117246 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:36:36,674 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.meta.1731026116690.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:36:37,506 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:36:37,506 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:36:37,529 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:36:37,529 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:36:37,529 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:36:37,529 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:36:37,529 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:36:37,530 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:36:37,533 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:36:37,534 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:36:37,534 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:36:37,536 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:36:37,541 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:36:37,541 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:36:37,671 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,39151,1731026116929/3302f0f507bd%2C39151%2C1731026116929.1731026117246 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:36:37,675 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.meta.1731026116690.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:36:37,695 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741830_1015: GenerationStamp not matched, existing replica is blk_1073741830_1006 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-08T00:36:38,045 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-08T00:36:38,047 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:36:38,047 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:36:38,048 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:36:38,048 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:36:38,071 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:36:38,072 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:36:38,072 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:36:38,072 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:36:38,072 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:36:38,073 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:36:38,077 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:36:38,077 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:36:38,077 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:36:38,080 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:36:38,672 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,39151,1731026116929/3302f0f507bd%2C39151%2C1731026116929.1731026117246 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:36:38,675 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.meta.1731026116690.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:36:39,673 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,39151,1731026116929/3302f0f507bd%2C39151%2C1731026116929.1731026117246 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:36:39,676 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.meta.1731026116690.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:36:40,357 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:34225/user/jenkins/test-data/a76a474e-2f29-615f-1108-03e596125458/MasterData/WALs/3302f0f507bd,39533,1731026165454/3302f0f507bd%2C39533%2C1731026165454.1731026166160 after 4002ms 2024-11-08T00:36:40,357 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:34225/user/jenkins/test-data/a76a474e-2f29-615f-1108-03e596125458/MasterData/WALs/3302f0f507bd,39533,1731026165454/3302f0f507bd%2C39533%2C1731026165454.1731026166160 to hdfs://localhost:34225/user/jenkins/test-data/a76a474e-2f29-615f-1108-03e596125458/MasterData/oldWALs/3302f0f507bd%2C39533%2C1731026165454.1731026166160 2024-11-08T00:36:40,360 INFO [WAL-Archive-0 {}] region.MasterRegionUtils(50): Moved hdfs://localhost:34225/user/jenkins/test-data/a76a474e-2f29-615f-1108-03e596125458/MasterData/oldWALs/3302f0f507bd%2C39533%2C1731026165454.1731026166160 to hdfs://localhost:34225/user/jenkins/test-data/a76a474e-2f29-615f-1108-03e596125458/oldWALs/3302f0f507bd%2C39533%2C1731026165454.1731026166160$masterlocalwal$ 2024-11-08T00:36:40,360 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-08T00:36:40,360 INFO [M:0;3302f0f507bd:39533 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-08T00:36:40,360 INFO [M:0;3302f0f507bd:39533 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:39533 2024-11-08T00:36:40,361 INFO [M:0;3302f0f507bd:39533 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-08T00:36:40,480 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39533-0x10117df1a690000, quorum=127.0.0.1:60383, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-08T00:36:40,480 INFO [M:0;3302f0f507bd:39533 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-08T00:36:40,480 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39533-0x10117df1a690000, quorum=127.0.0.1:60383, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-08T00:36:40,483 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@c19f11a{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-08T00:36:40,484 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@75d9c3d8{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-08T00:36:40,484 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-08T00:36:40,484 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@71ef12de{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-08T00:36:40,485 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@523d3401{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a5199574-9c10-014b-fdba-002f0e9cd2f6/hadoop.log.dir/,STOPPED} 2024-11-08T00:36:40,486 WARN [BP-1358657480-172.17.0.3-1731026163290 heartbeating to localhost/127.0.0.1:34225 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-08T00:36:40,486 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-08T00:36:40,487 WARN [BP-1358657480-172.17.0.3-1731026163290 heartbeating to localhost/127.0.0.1:34225 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1358657480-172.17.0.3-1731026163290 (Datanode Uuid 44ed9313-edd8-4d80-8a5d-c2753b809252) service to localhost/127.0.0.1:34225 2024-11-08T00:36:40,487 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-08T00:36:40,488 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a5199574-9c10-014b-fdba-002f0e9cd2f6/cluster_bb62c712-dc1b-a57d-173a-c6e86e8d7958/data/data3/current/BP-1358657480-172.17.0.3-1731026163290 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-08T00:36:40,488 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a5199574-9c10-014b-fdba-002f0e9cd2f6/cluster_bb62c712-dc1b-a57d-173a-c6e86e8d7958/data/data4/current/BP-1358657480-172.17.0.3-1731026163290 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-08T00:36:40,488 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-08T00:36:40,491 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4b547078{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-08T00:36:40,491 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@424d5648{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-08T00:36:40,491 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-08T00:36:40,491 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5268e5d2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-08T00:36:40,491 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@68df0564{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a5199574-9c10-014b-fdba-002f0e9cd2f6/hadoop.log.dir/,STOPPED} 2024-11-08T00:36:40,492 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-08T00:36:40,492 WARN [BP-1358657480-172.17.0.3-1731026163290 heartbeating to localhost/127.0.0.1:34225 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-08T00:36:40,492 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-08T00:36:40,492 WARN [BP-1358657480-172.17.0.3-1731026163290 heartbeating to localhost/127.0.0.1:34225 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1358657480-172.17.0.3-1731026163290 (Datanode Uuid 55ce6167-532a-40a5-9fd7-373da715fc23) service to localhost/127.0.0.1:34225 2024-11-08T00:36:40,493 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a5199574-9c10-014b-fdba-002f0e9cd2f6/cluster_bb62c712-dc1b-a57d-173a-c6e86e8d7958/data/data1/current/BP-1358657480-172.17.0.3-1731026163290 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-08T00:36:40,493 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a5199574-9c10-014b-fdba-002f0e9cd2f6/cluster_bb62c712-dc1b-a57d-173a-c6e86e8d7958/data/data2/current/BP-1358657480-172.17.0.3-1731026163290 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-08T00:36:40,493 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-08T00:36:40,498 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4360f0f4{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-08T00:36:40,498 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4bc294e4{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-08T00:36:40,498 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-08T00:36:40,498 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@61f9169f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-08T00:36:40,499 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5c07fc0{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a5199574-9c10-014b-fdba-002f0e9cd2f6/hadoop.log.dir/,STOPPED} 2024-11-08T00:36:40,505 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-08T00:36:40,524 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-08T00:36:40,531 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnPipelineRestart Thread=183 (was 157) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-14 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:34225 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Command processor java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Potentially hanging thread: nioEventLoopGroup-30-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:34225 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:34225 from jenkins.hfs.4 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-13 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:34225 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-15 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:34225 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.4@localhost:34225 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:34225 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:34225 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-12 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=457 (was 448) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=224 (was 190) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=6136 (was 6320) 2024-11-08T00:36:40,539 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testCompactionRecordDoesntBlockRolling Thread=183, OpenFileDescriptor=457, MaxFileDescriptor=1048576, SystemLoadAverage=224, ProcessCount=11, AvailableMemoryMB=6138 2024-11-08T00:36:40,539 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-08T00:36:40,539 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a5199574-9c10-014b-fdba-002f0e9cd2f6/hadoop.log.dir so I do NOT create it in target/test-data/a090713a-d661-5af9-c70e-cd83f95f0d1a 2024-11-08T00:36:40,539 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a5199574-9c10-014b-fdba-002f0e9cd2f6/hadoop.tmp.dir so I do NOT create it in target/test-data/a090713a-d661-5af9-c70e-cd83f95f0d1a 2024-11-08T00:36:40,539 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a090713a-d661-5af9-c70e-cd83f95f0d1a/cluster_47bce739-856b-0996-e8e7-6762f6a1d79e, deleteOnExit=true 2024-11-08T00:36:40,539 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-08T00:36:40,540 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a090713a-d661-5af9-c70e-cd83f95f0d1a/test.cache.data in system properties and HBase conf 2024-11-08T00:36:40,540 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a090713a-d661-5af9-c70e-cd83f95f0d1a/hadoop.tmp.dir in system properties and HBase conf 2024-11-08T00:36:40,540 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a090713a-d661-5af9-c70e-cd83f95f0d1a/hadoop.log.dir in system properties and HBase conf 2024-11-08T00:36:40,540 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a090713a-d661-5af9-c70e-cd83f95f0d1a/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-08T00:36:40,540 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a090713a-d661-5af9-c70e-cd83f95f0d1a/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-08T00:36:40,540 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-08T00:36:40,540 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-08T00:36:40,540 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a090713a-d661-5af9-c70e-cd83f95f0d1a/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-08T00:36:40,540 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a090713a-d661-5af9-c70e-cd83f95f0d1a/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-08T00:36:40,540 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a090713a-d661-5af9-c70e-cd83f95f0d1a/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-08T00:36:40,540 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a090713a-d661-5af9-c70e-cd83f95f0d1a/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-08T00:36:40,541 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a090713a-d661-5af9-c70e-cd83f95f0d1a/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-08T00:36:40,541 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a090713a-d661-5af9-c70e-cd83f95f0d1a/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-08T00:36:40,541 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a090713a-d661-5af9-c70e-cd83f95f0d1a/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-08T00:36:40,541 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a090713a-d661-5af9-c70e-cd83f95f0d1a/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-08T00:36:40,541 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a090713a-d661-5af9-c70e-cd83f95f0d1a/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-08T00:36:40,541 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a090713a-d661-5af9-c70e-cd83f95f0d1a/nfs.dump.dir in system properties and HBase conf 2024-11-08T00:36:40,541 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a090713a-d661-5af9-c70e-cd83f95f0d1a/java.io.tmpdir in system properties and HBase conf 2024-11-08T00:36:40,541 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a090713a-d661-5af9-c70e-cd83f95f0d1a/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-08T00:36:40,541 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a090713a-d661-5af9-c70e-cd83f95f0d1a/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-08T00:36:40,541 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a090713a-d661-5af9-c70e-cd83f95f0d1a/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-08T00:36:40,555 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-08T00:36:40,674 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,39151,1731026116929/3302f0f507bd%2C39151%2C1731026116929.1731026117246 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:36:40,676 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.meta.1731026116690.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:36:40,933 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-08T00:36:40,938 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-08T00:36:40,940 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-08T00:36:40,940 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-08T00:36:40,940 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-08T00:36:40,940 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-08T00:36:40,941 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6da95783{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a090713a-d661-5af9-c70e-cd83f95f0d1a/hadoop.log.dir/,AVAILABLE} 2024-11-08T00:36:40,942 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@df163d0{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-08T00:36:41,060 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@30d9f702{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a090713a-d661-5af9-c70e-cd83f95f0d1a/java.io.tmpdir/jetty-localhost-35971-hadoop-hdfs-3_4_1-tests_jar-_-any-17975642273899890932/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-08T00:36:41,060 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6d483d07{HTTP/1.1, (http/1.1)}{localhost:35971} 2024-11-08T00:36:41,060 INFO [Time-limited test {}] server.Server(415): Started @195157ms 2024-11-08T00:36:41,075 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-08T00:36:41,336 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-08T00:36:41,340 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-08T00:36:41,341 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-08T00:36:41,341 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-08T00:36:41,341 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-08T00:36:41,341 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@278dab99{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a090713a-d661-5af9-c70e-cd83f95f0d1a/hadoop.log.dir/,AVAILABLE} 2024-11-08T00:36:41,341 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@e6bebf5{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-08T00:36:41,454 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@e4582a5{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a090713a-d661-5af9-c70e-cd83f95f0d1a/java.io.tmpdir/jetty-localhost-33113-hadoop-hdfs-3_4_1-tests_jar-_-any-5649218287362450153/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-08T00:36:41,455 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@37d74326{HTTP/1.1, (http/1.1)}{localhost:33113} 2024-11-08T00:36:41,455 INFO [Time-limited test {}] server.Server(415): Started @195552ms 2024-11-08T00:36:41,456 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-08T00:36:41,485 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-08T00:36:41,488 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-08T00:36:41,489 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-08T00:36:41,489 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-08T00:36:41,489 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-08T00:36:41,490 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7009eb0{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a090713a-d661-5af9-c70e-cd83f95f0d1a/hadoop.log.dir/,AVAILABLE} 2024-11-08T00:36:41,490 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@65345c29{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-08T00:36:41,593 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@14b98ef8{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a090713a-d661-5af9-c70e-cd83f95f0d1a/java.io.tmpdir/jetty-localhost-45229-hadoop-hdfs-3_4_1-tests_jar-_-any-8446100091572878627/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-08T00:36:41,594 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7fc2e521{HTTP/1.1, (http/1.1)}{localhost:45229} 2024-11-08T00:36:41,594 INFO [Time-limited test {}] server.Server(415): Started @195691ms 2024-11-08T00:36:41,595 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-08T00:36:41,674 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,39151,1731026116929/3302f0f507bd%2C39151%2C1731026116929.1731026117246 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:36:41,677 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.meta.1731026116690.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:36:41,966 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-08T00:36:41,966 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-08T00:36:41,966 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-08T00:36:41,966 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart 2024-11-08T00:36:42,675 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,39151,1731026116929/3302f0f507bd%2C39151%2C1731026116929.1731026117246 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:36:42,677 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.meta.1731026116690.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:36:42,855 WARN [Thread-1644 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a090713a-d661-5af9-c70e-cd83f95f0d1a/cluster_47bce739-856b-0996-e8e7-6762f6a1d79e/data/data1/current/BP-1032823788-172.17.0.3-1731026200566/current, will proceed with Du for space computation calculation, 2024-11-08T00:36:42,855 WARN [Thread-1645 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a090713a-d661-5af9-c70e-cd83f95f0d1a/cluster_47bce739-856b-0996-e8e7-6762f6a1d79e/data/data2/current/BP-1032823788-172.17.0.3-1731026200566/current, will proceed with Du for space computation calculation, 2024-11-08T00:36:42,873 WARN [Thread-1608 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-08T00:36:42,876 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xce17ae61c6fe2185 with lease ID 0x5380eb0892d31882: Processing first storage report for DS-20b85e0c-7df0-42d1-b293-10a5a46d606b from datanode DatanodeRegistration(127.0.0.1:46139, datanodeUuid=41142ddb-f7bc-418d-a666-f5f8dbafdba5, infoPort=33855, infoSecurePort=0, ipcPort=34295, storageInfo=lv=-57;cid=testClusterID;nsid=1826903936;c=1731026200566) 2024-11-08T00:36:42,876 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xce17ae61c6fe2185 with lease ID 0x5380eb0892d31882: from storage DS-20b85e0c-7df0-42d1-b293-10a5a46d606b node DatanodeRegistration(127.0.0.1:46139, datanodeUuid=41142ddb-f7bc-418d-a666-f5f8dbafdba5, infoPort=33855, infoSecurePort=0, ipcPort=34295, storageInfo=lv=-57;cid=testClusterID;nsid=1826903936;c=1731026200566), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-08T00:36:42,876 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xce17ae61c6fe2185 with lease ID 0x5380eb0892d31882: Processing first storage report for DS-fa66e195-b6f9-4b5f-b399-902dde94d9f0 from datanode DatanodeRegistration(127.0.0.1:46139, datanodeUuid=41142ddb-f7bc-418d-a666-f5f8dbafdba5, infoPort=33855, infoSecurePort=0, ipcPort=34295, storageInfo=lv=-57;cid=testClusterID;nsid=1826903936;c=1731026200566) 2024-11-08T00:36:42,876 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xce17ae61c6fe2185 with lease ID 0x5380eb0892d31882: from storage DS-fa66e195-b6f9-4b5f-b399-902dde94d9f0 node DatanodeRegistration(127.0.0.1:46139, datanodeUuid=41142ddb-f7bc-418d-a666-f5f8dbafdba5, infoPort=33855, infoSecurePort=0, ipcPort=34295, storageInfo=lv=-57;cid=testClusterID;nsid=1826903936;c=1731026200566), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-08T00:36:42,993 WARN [Thread-1655 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a090713a-d661-5af9-c70e-cd83f95f0d1a/cluster_47bce739-856b-0996-e8e7-6762f6a1d79e/data/data3/current/BP-1032823788-172.17.0.3-1731026200566/current, will proceed with Du for space computation calculation, 2024-11-08T00:36:42,993 WARN [Thread-1656 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a090713a-d661-5af9-c70e-cd83f95f0d1a/cluster_47bce739-856b-0996-e8e7-6762f6a1d79e/data/data4/current/BP-1032823788-172.17.0.3-1731026200566/current, will proceed with Du for space computation calculation, 2024-11-08T00:36:43,012 WARN [Thread-1631 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-08T00:36:43,017 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x41863418b1df891a with lease ID 0x5380eb0892d31883: Processing first storage report for DS-a2b1234f-c7c7-4b96-a693-c3fbe31be6de from datanode DatanodeRegistration(127.0.0.1:43893, datanodeUuid=b59bfa08-3364-48c9-b627-1f6ae932366c, infoPort=32831, infoSecurePort=0, ipcPort=46181, storageInfo=lv=-57;cid=testClusterID;nsid=1826903936;c=1731026200566) 2024-11-08T00:36:43,017 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x41863418b1df891a with lease ID 0x5380eb0892d31883: from storage DS-a2b1234f-c7c7-4b96-a693-c3fbe31be6de node DatanodeRegistration(127.0.0.1:43893, datanodeUuid=b59bfa08-3364-48c9-b627-1f6ae932366c, infoPort=32831, infoSecurePort=0, ipcPort=46181, storageInfo=lv=-57;cid=testClusterID;nsid=1826903936;c=1731026200566), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-08T00:36:43,017 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x41863418b1df891a with lease ID 0x5380eb0892d31883: Processing first storage report for DS-fccc7276-744d-487b-a4b4-9f6ae83406f5 from datanode DatanodeRegistration(127.0.0.1:43893, datanodeUuid=b59bfa08-3364-48c9-b627-1f6ae932366c, infoPort=32831, infoSecurePort=0, ipcPort=46181, storageInfo=lv=-57;cid=testClusterID;nsid=1826903936;c=1731026200566) 2024-11-08T00:36:43,017 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x41863418b1df891a with lease ID 0x5380eb0892d31883: from storage DS-fccc7276-744d-487b-a4b4-9f6ae83406f5 node DatanodeRegistration(127.0.0.1:43893, datanodeUuid=b59bfa08-3364-48c9-b627-1f6ae932366c, infoPort=32831, infoSecurePort=0, ipcPort=46181, storageInfo=lv=-57;cid=testClusterID;nsid=1826903936;c=1731026200566), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-08T00:36:43,027 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a090713a-d661-5af9-c70e-cd83f95f0d1a 2024-11-08T00:36:43,030 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a090713a-d661-5af9-c70e-cd83f95f0d1a/cluster_47bce739-856b-0996-e8e7-6762f6a1d79e/zookeeper_0, clientPort=56642, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a090713a-d661-5af9-c70e-cd83f95f0d1a/cluster_47bce739-856b-0996-e8e7-6762f6a1d79e/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a090713a-d661-5af9-c70e-cd83f95f0d1a/cluster_47bce739-856b-0996-e8e7-6762f6a1d79e/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-08T00:36:43,031 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=56642 2024-11-08T00:36:43,031 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-08T00:36:43,034 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-08T00:36:43,043 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46139 is added to blk_1073741825_1001 (size=7) 2024-11-08T00:36:43,043 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741825_1001 (size=7) 2024-11-08T00:36:43,044 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:34977/user/jenkins/test-data/71b20495-1467-5519-e967-680d6d9a6c0b with version=8 2024-11-08T00:36:43,044 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:45125/user/jenkins/test-data/a52118f7-c961-aacf-3827-5de0e737d868/hbase-staging 2024-11-08T00:36:43,046 INFO [Time-limited test {}] client.ConnectionUtils(128): master/3302f0f507bd:0 server-side Connection retries=45 2024-11-08T00:36:43,046 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-08T00:36:43,047 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-08T00:36:43,047 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-08T00:36:43,047 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-08T00:36:43,047 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-08T00:36:43,047 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-08T00:36:43,047 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-08T00:36:43,048 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:46879 2024-11-08T00:36:43,050 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:46879 connecting to ZooKeeper ensemble=127.0.0.1:56642 2024-11-08T00:36:43,108 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:468790x0, quorum=127.0.0.1:56642, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-08T00:36:43,109 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:46879-0x10117dfad400000 connected 2024-11-08T00:36:43,190 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-08T00:36:43,192 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-08T00:36:43,195 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:46879-0x10117dfad400000, quorum=127.0.0.1:56642, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-08T00:36:43,196 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:34977/user/jenkins/test-data/71b20495-1467-5519-e967-680d6d9a6c0b, hbase.cluster.distributed=false 2024-11-08T00:36:43,197 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:46879-0x10117dfad400000, quorum=127.0.0.1:56642, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-08T00:36:43,198 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=46879 2024-11-08T00:36:43,198 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=46879 2024-11-08T00:36:43,198 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=46879 2024-11-08T00:36:43,199 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=46879 2024-11-08T00:36:43,199 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=46879 2024-11-08T00:36:43,220 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/3302f0f507bd:0 server-side Connection retries=45 2024-11-08T00:36:43,220 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-08T00:36:43,220 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-08T00:36:43,220 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-08T00:36:43,221 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-08T00:36:43,221 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-08T00:36:43,221 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-08T00:36:43,221 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-08T00:36:43,222 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:38463 2024-11-08T00:36:43,224 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:38463 connecting to ZooKeeper ensemble=127.0.0.1:56642 2024-11-08T00:36:43,225 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-08T00:36:43,227 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-08T00:36:43,243 DEBUG [pool-700-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:384630x0, quorum=127.0.0.1:56642, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-08T00:36:43,243 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38463-0x10117dfad400001, quorum=127.0.0.1:56642, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-08T00:36:43,243 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:38463-0x10117dfad400001 connected 2024-11-08T00:36:43,244 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-08T00:36:43,244 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-08T00:36:43,245 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38463-0x10117dfad400001, quorum=127.0.0.1:56642, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-08T00:36:43,246 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38463-0x10117dfad400001, quorum=127.0.0.1:56642, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-08T00:36:43,247 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=38463 2024-11-08T00:36:43,247 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=38463 2024-11-08T00:36:43,247 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=38463 2024-11-08T00:36:43,248 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=38463 2024-11-08T00:36:43,248 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=38463 2024-11-08T00:36:43,260 DEBUG [M:0;3302f0f507bd:46879 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;3302f0f507bd:46879 2024-11-08T00:36:43,260 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/3302f0f507bd,46879,1731026203046 2024-11-08T00:36:43,269 DEBUG [pool-700-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38463-0x10117dfad400001, quorum=127.0.0.1:56642, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-08T00:36:43,269 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46879-0x10117dfad400000, quorum=127.0.0.1:56642, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-08T00:36:43,270 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:46879-0x10117dfad400000, quorum=127.0.0.1:56642, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/3302f0f507bd,46879,1731026203046 2024-11-08T00:36:43,279 DEBUG [pool-700-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38463-0x10117dfad400001, quorum=127.0.0.1:56642, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-08T00:36:43,280 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46879-0x10117dfad400000, quorum=127.0.0.1:56642, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T00:36:43,280 DEBUG [pool-700-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38463-0x10117dfad400001, quorum=127.0.0.1:56642, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T00:36:43,280 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:46879-0x10117dfad400000, quorum=127.0.0.1:56642, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-08T00:36:43,280 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/3302f0f507bd,46879,1731026203046 from backup master directory 2024-11-08T00:36:43,290 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46879-0x10117dfad400000, quorum=127.0.0.1:56642, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/3302f0f507bd,46879,1731026203046 2024-11-08T00:36:43,290 DEBUG [pool-700-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38463-0x10117dfad400001, quorum=127.0.0.1:56642, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-08T00:36:43,290 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46879-0x10117dfad400000, quorum=127.0.0.1:56642, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-08T00:36:43,290 WARN [master/3302f0f507bd:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-08T00:36:43,290 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=3302f0f507bd,46879,1731026203046 2024-11-08T00:36:43,296 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:34977/user/jenkins/test-data/71b20495-1467-5519-e967-680d6d9a6c0b/hbase.id] with ID: b052bb93-fbdb-4d54-91fc-da7e1fa5ecca 2024-11-08T00:36:43,296 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:34977/user/jenkins/test-data/71b20495-1467-5519-e967-680d6d9a6c0b/.tmp/hbase.id 2024-11-08T00:36:43,302 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46139 is added to blk_1073741826_1002 (size=42) 2024-11-08T00:36:43,302 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741826_1002 (size=42) 2024-11-08T00:36:43,303 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:34977/user/jenkins/test-data/71b20495-1467-5519-e967-680d6d9a6c0b/.tmp/hbase.id]:[hdfs://localhost:34977/user/jenkins/test-data/71b20495-1467-5519-e967-680d6d9a6c0b/hbase.id] 2024-11-08T00:36:43,316 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-08T00:36:43,317 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-08T00:36:43,318 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-08T00:36:43,327 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46879-0x10117dfad400000, quorum=127.0.0.1:56642, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T00:36:43,327 DEBUG [pool-700-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38463-0x10117dfad400001, quorum=127.0.0.1:56642, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T00:36:43,338 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741827_1003 (size=196) 2024-11-08T00:36:43,338 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46139 is added to blk_1073741827_1003 (size=196) 2024-11-08T00:36:43,338 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-08T00:36:43,339 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-08T00:36:43,339 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-08T00:36:43,346 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46139 is added to blk_1073741828_1004 (size=1189) 2024-11-08T00:36:43,346 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741828_1004 (size=1189) 2024-11-08T00:36:43,347 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:34977/user/jenkins/test-data/71b20495-1467-5519-e967-680d6d9a6c0b/MasterData/data/master/store 2024-11-08T00:36:43,353 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46139 is added to blk_1073741829_1005 (size=34) 2024-11-08T00:36:43,353 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741829_1005 (size=34) 2024-11-08T00:36:43,354 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-08T00:36:43,354 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-08T00:36:43,354 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-08T00:36:43,354 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-08T00:36:43,354 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-08T00:36:43,354 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-08T00:36:43,354 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-08T00:36:43,354 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731026203354Disabling compacts and flushes for region at 1731026203354Disabling writes for close at 1731026203354Writing region close event to WAL at 1731026203354Closed at 1731026203354 2024-11-08T00:36:43,355 WARN [master/3302f0f507bd:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:34977/user/jenkins/test-data/71b20495-1467-5519-e967-680d6d9a6c0b/MasterData/data/master/store/.initializing 2024-11-08T00:36:43,355 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:34977/user/jenkins/test-data/71b20495-1467-5519-e967-680d6d9a6c0b/MasterData/WALs/3302f0f507bd,46879,1731026203046 2024-11-08T00:36:43,358 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=3302f0f507bd%2C46879%2C1731026203046, suffix=, logDir=hdfs://localhost:34977/user/jenkins/test-data/71b20495-1467-5519-e967-680d6d9a6c0b/MasterData/WALs/3302f0f507bd,46879,1731026203046, archiveDir=hdfs://localhost:34977/user/jenkins/test-data/71b20495-1467-5519-e967-680d6d9a6c0b/MasterData/oldWALs, maxLogs=10 2024-11-08T00:36:43,358 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3302f0f507bd%2C46879%2C1731026203046.1731026203358 2024-11-08T00:36:43,363 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/71b20495-1467-5519-e967-680d6d9a6c0b/MasterData/WALs/3302f0f507bd,46879,1731026203046/3302f0f507bd%2C46879%2C1731026203046.1731026203358 2024-11-08T00:36:43,369 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:32831:32831),(127.0.0.1/127.0.0.1:33855:33855)] 2024-11-08T00:36:43,370 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-08T00:36:43,370 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-08T00:36:43,370 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-08T00:36:43,370 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-08T00:36:43,371 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-08T00:36:43,373 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-08T00:36:43,373 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T00:36:43,374 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-08T00:36:43,374 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-08T00:36:43,375 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-08T00:36:43,375 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T00:36:43,376 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-08T00:36:43,376 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-08T00:36:43,377 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-08T00:36:43,377 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T00:36:43,378 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-08T00:36:43,378 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-08T00:36:43,379 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-08T00:36:43,379 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T00:36:43,379 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-08T00:36:43,380 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-08T00:36:43,380 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34977/user/jenkins/test-data/71b20495-1467-5519-e967-680d6d9a6c0b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-08T00:36:43,381 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34977/user/jenkins/test-data/71b20495-1467-5519-e967-680d6d9a6c0b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-08T00:36:43,382 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-08T00:36:43,382 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-08T00:36:43,383 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-08T00:36:43,384 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-08T00:36:43,386 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34977/user/jenkins/test-data/71b20495-1467-5519-e967-680d6d9a6c0b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-08T00:36:43,387 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=797323, jitterRate=0.01384878158569336}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-08T00:36:43,387 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731026203370Initializing all the Stores at 1731026203371 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731026203371Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731026203371Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731026203371Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731026203371Cleaning up temporary data from old regions at 1731026203382 (+11 ms)Region opened successfully at 1731026203387 (+5 ms) 2024-11-08T00:36:43,388 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-08T00:36:43,391 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@75e57cd7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=3302f0f507bd/172.17.0.3:0 2024-11-08T00:36:43,391 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-08T00:36:43,392 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-08T00:36:43,392 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-08T00:36:43,392 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-08T00:36:43,393 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-08T00:36:43,393 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-08T00:36:43,393 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-08T00:36:43,395 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-08T00:36:43,396 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46879-0x10117dfad400000, quorum=127.0.0.1:56642, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-08T00:36:43,406 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-08T00:36:43,406 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-08T00:36:43,407 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46879-0x10117dfad400000, quorum=127.0.0.1:56642, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-08T00:36:43,416 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-08T00:36:43,417 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-08T00:36:43,418 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46879-0x10117dfad400000, quorum=127.0.0.1:56642, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-08T00:36:43,427 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-08T00:36:43,429 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46879-0x10117dfad400000, quorum=127.0.0.1:56642, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-08T00:36:43,437 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-08T00:36:43,440 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46879-0x10117dfad400000, quorum=127.0.0.1:56642, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-08T00:36:43,448 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-08T00:36:43,459 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46879-0x10117dfad400000, quorum=127.0.0.1:56642, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-08T00:36:43,459 DEBUG [pool-700-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38463-0x10117dfad400001, quorum=127.0.0.1:56642, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-08T00:36:43,459 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46879-0x10117dfad400000, quorum=127.0.0.1:56642, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T00:36:43,459 DEBUG [pool-700-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38463-0x10117dfad400001, quorum=127.0.0.1:56642, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T00:36:43,459 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=3302f0f507bd,46879,1731026203046, sessionid=0x10117dfad400000, setting cluster-up flag (Was=false) 2024-11-08T00:36:43,479 DEBUG [pool-700-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38463-0x10117dfad400001, quorum=127.0.0.1:56642, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T00:36:43,480 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46879-0x10117dfad400000, quorum=127.0.0.1:56642, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T00:36:43,511 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-08T00:36:43,512 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=3302f0f507bd,46879,1731026203046 2024-11-08T00:36:43,532 DEBUG [pool-700-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38463-0x10117dfad400001, quorum=127.0.0.1:56642, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T00:36:43,532 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46879-0x10117dfad400000, quorum=127.0.0.1:56642, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T00:36:43,564 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-08T00:36:43,565 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=3302f0f507bd,46879,1731026203046 2024-11-08T00:36:43,566 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:34977/user/jenkins/test-data/71b20495-1467-5519-e967-680d6d9a6c0b/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-08T00:36:43,568 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-08T00:36:43,568 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-08T00:36:43,568 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-08T00:36:43,568 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 3302f0f507bd,46879,1731026203046 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-08T00:36:43,570 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/3302f0f507bd:0, corePoolSize=5, maxPoolSize=5 2024-11-08T00:36:43,570 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/3302f0f507bd:0, corePoolSize=5, maxPoolSize=5 2024-11-08T00:36:43,570 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/3302f0f507bd:0, corePoolSize=5, maxPoolSize=5 2024-11-08T00:36:43,570 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/3302f0f507bd:0, corePoolSize=5, maxPoolSize=5 2024-11-08T00:36:43,570 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/3302f0f507bd:0, corePoolSize=10, maxPoolSize=10 2024-11-08T00:36:43,570 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/3302f0f507bd:0, corePoolSize=1, maxPoolSize=1 2024-11-08T00:36:43,570 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/3302f0f507bd:0, corePoolSize=2, maxPoolSize=2 2024-11-08T00:36:43,570 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/3302f0f507bd:0, corePoolSize=1, maxPoolSize=1 2024-11-08T00:36:43,572 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731026233572 2024-11-08T00:36:43,572 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-08T00:36:43,572 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-08T00:36:43,572 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-08T00:36:43,572 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-08T00:36:43,572 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-08T00:36:43,572 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-08T00:36:43,572 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-08T00:36:43,572 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-08T00:36:43,572 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-08T00:36:43,573 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-08T00:36:43,573 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-08T00:36:43,573 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-08T00:36:43,574 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T00:36:43,574 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-08T00:36:43,574 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-08T00:36:43,574 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-08T00:36:43,574 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/3302f0f507bd:0:becomeActiveMaster-HFileCleaner.large.0-1731026203574,5,FailOnTimeoutGroup] 2024-11-08T00:36:43,574 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/3302f0f507bd:0:becomeActiveMaster-HFileCleaner.small.0-1731026203574,5,FailOnTimeoutGroup] 2024-11-08T00:36:43,574 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-08T00:36:43,574 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-08T00:36:43,574 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-08T00:36:43,574 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-08T00:36:43,589 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46139 is added to blk_1073741831_1007 (size=1321) 2024-11-08T00:36:43,590 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741831_1007 (size=1321) 2024-11-08T00:36:43,590 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:34977/user/jenkins/test-data/71b20495-1467-5519-e967-680d6d9a6c0b/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-08T00:36:43,591 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:34977/user/jenkins/test-data/71b20495-1467-5519-e967-680d6d9a6c0b 2024-11-08T00:36:43,601 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741832_1008 (size=32) 2024-11-08T00:36:43,602 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46139 is added to blk_1073741832_1008 (size=32) 2024-11-08T00:36:43,602 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-08T00:36:43,604 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-08T00:36:43,606 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-08T00:36:43,606 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T00:36:43,606 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-08T00:36:43,606 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-08T00:36:43,607 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-08T00:36:43,608 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T00:36:43,608 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-08T00:36:43,608 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-08T00:36:43,609 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-08T00:36:43,610 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T00:36:43,610 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-08T00:36:43,611 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-08T00:36:43,612 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-08T00:36:43,612 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T00:36:43,613 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-08T00:36:43,613 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-08T00:36:43,614 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34977/user/jenkins/test-data/71b20495-1467-5519-e967-680d6d9a6c0b/data/hbase/meta/1588230740 2024-11-08T00:36:43,614 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34977/user/jenkins/test-data/71b20495-1467-5519-e967-680d6d9a6c0b/data/hbase/meta/1588230740 2024-11-08T00:36:43,615 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-08T00:36:43,615 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-08T00:36:43,616 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-08T00:36:43,617 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-08T00:36:43,619 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34977/user/jenkins/test-data/71b20495-1467-5519-e967-680d6d9a6c0b/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-08T00:36:43,619 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=861746, jitterRate=0.0957673043012619}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-08T00:36:43,620 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731026203602Initializing all the Stores at 1731026203603 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731026203603Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731026203604 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731026203604Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731026203604Cleaning up temporary data from old regions at 1731026203615 (+11 ms)Region opened successfully at 1731026203620 (+5 ms) 2024-11-08T00:36:43,620 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-08T00:36:43,620 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-08T00:36:43,620 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-08T00:36:43,620 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-08T00:36:43,620 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-08T00:36:43,621 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-08T00:36:43,621 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731026203620Disabling compacts and flushes for region at 1731026203620Disabling writes for close at 1731026203620Writing region close event to WAL at 1731026203621 (+1 ms)Closed at 1731026203621 2024-11-08T00:36:43,622 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-08T00:36:43,623 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-08T00:36:43,623 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-08T00:36:43,624 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-08T00:36:43,626 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-08T00:36:43,650 INFO [RS:0;3302f0f507bd:38463 {}] regionserver.HRegionServer(746): ClusterId : b052bb93-fbdb-4d54-91fc-da7e1fa5ecca 2024-11-08T00:36:43,650 DEBUG [RS:0;3302f0f507bd:38463 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-08T00:36:43,659 DEBUG [RS:0;3302f0f507bd:38463 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-08T00:36:43,660 DEBUG [RS:0;3302f0f507bd:38463 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-08T00:36:43,670 DEBUG [RS:0;3302f0f507bd:38463 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-08T00:36:43,670 DEBUG [RS:0;3302f0f507bd:38463 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@b51144e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=3302f0f507bd/172.17.0.3:0 2024-11-08T00:36:43,675 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,39151,1731026116929/3302f0f507bd%2C39151%2C1731026116929.1731026117246 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:36:43,678 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.meta.1731026116690.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:36:43,688 DEBUG [RS:0;3302f0f507bd:38463 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;3302f0f507bd:38463 2024-11-08T00:36:43,688 INFO [RS:0;3302f0f507bd:38463 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-08T00:36:43,688 INFO [RS:0;3302f0f507bd:38463 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-08T00:36:43,688 DEBUG [RS:0;3302f0f507bd:38463 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-08T00:36:43,688 INFO [RS:0;3302f0f507bd:38463 {}] regionserver.HRegionServer(2659): reportForDuty to master=3302f0f507bd,46879,1731026203046 with port=38463, startcode=1731026203220 2024-11-08T00:36:43,689 DEBUG [RS:0;3302f0f507bd:38463 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-08T00:36:43,692 INFO [HMaster-EventLoopGroup-12-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:57857, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.5 (auth:SIMPLE), service=RegionServerStatusService 2024-11-08T00:36:43,692 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46879 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 3302f0f507bd,38463,1731026203220 2024-11-08T00:36:43,692 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46879 {}] master.ServerManager(517): Registering regionserver=3302f0f507bd,38463,1731026203220 2024-11-08T00:36:43,694 DEBUG [RS:0;3302f0f507bd:38463 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:34977/user/jenkins/test-data/71b20495-1467-5519-e967-680d6d9a6c0b 2024-11-08T00:36:43,694 DEBUG [RS:0;3302f0f507bd:38463 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:34977 2024-11-08T00:36:43,694 DEBUG [RS:0;3302f0f507bd:38463 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-08T00:36:43,706 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46879-0x10117dfad400000, quorum=127.0.0.1:56642, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-08T00:36:43,706 DEBUG [RS:0;3302f0f507bd:38463 {}] zookeeper.ZKUtil(111): regionserver:38463-0x10117dfad400001, quorum=127.0.0.1:56642, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/3302f0f507bd,38463,1731026203220 2024-11-08T00:36:43,706 WARN [RS:0;3302f0f507bd:38463 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-08T00:36:43,707 INFO [RS:0;3302f0f507bd:38463 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-08T00:36:43,707 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [3302f0f507bd,38463,1731026203220] 2024-11-08T00:36:43,707 DEBUG [RS:0;3302f0f507bd:38463 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:34977/user/jenkins/test-data/71b20495-1467-5519-e967-680d6d9a6c0b/WALs/3302f0f507bd,38463,1731026203220 2024-11-08T00:36:43,711 INFO [RS:0;3302f0f507bd:38463 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-08T00:36:43,713 INFO [RS:0;3302f0f507bd:38463 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-08T00:36:43,713 INFO [RS:0;3302f0f507bd:38463 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-08T00:36:43,713 INFO [RS:0;3302f0f507bd:38463 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-08T00:36:43,714 INFO [RS:0;3302f0f507bd:38463 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-08T00:36:43,714 INFO [RS:0;3302f0f507bd:38463 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-08T00:36:43,714 INFO [RS:0;3302f0f507bd:38463 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-08T00:36:43,715 DEBUG [RS:0;3302f0f507bd:38463 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/3302f0f507bd:0, corePoolSize=1, maxPoolSize=1 2024-11-08T00:36:43,715 DEBUG [RS:0;3302f0f507bd:38463 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/3302f0f507bd:0, corePoolSize=1, maxPoolSize=1 2024-11-08T00:36:43,715 DEBUG [RS:0;3302f0f507bd:38463 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/3302f0f507bd:0, corePoolSize=1, maxPoolSize=1 2024-11-08T00:36:43,715 DEBUG [RS:0;3302f0f507bd:38463 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/3302f0f507bd:0, corePoolSize=1, maxPoolSize=1 2024-11-08T00:36:43,715 DEBUG [RS:0;3302f0f507bd:38463 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/3302f0f507bd:0, corePoolSize=1, maxPoolSize=1 2024-11-08T00:36:43,715 DEBUG [RS:0;3302f0f507bd:38463 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/3302f0f507bd:0, corePoolSize=2, maxPoolSize=2 2024-11-08T00:36:43,715 DEBUG [RS:0;3302f0f507bd:38463 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/3302f0f507bd:0, corePoolSize=1, maxPoolSize=1 2024-11-08T00:36:43,715 DEBUG [RS:0;3302f0f507bd:38463 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/3302f0f507bd:0, corePoolSize=1, maxPoolSize=1 2024-11-08T00:36:43,715 DEBUG [RS:0;3302f0f507bd:38463 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/3302f0f507bd:0, corePoolSize=1, maxPoolSize=1 2024-11-08T00:36:43,715 DEBUG [RS:0;3302f0f507bd:38463 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/3302f0f507bd:0, corePoolSize=1, maxPoolSize=1 2024-11-08T00:36:43,715 DEBUG [RS:0;3302f0f507bd:38463 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/3302f0f507bd:0, corePoolSize=1, maxPoolSize=1 2024-11-08T00:36:43,715 DEBUG [RS:0;3302f0f507bd:38463 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/3302f0f507bd:0, corePoolSize=1, maxPoolSize=1 2024-11-08T00:36:43,715 DEBUG [RS:0;3302f0f507bd:38463 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/3302f0f507bd:0, corePoolSize=3, maxPoolSize=3 2024-11-08T00:36:43,715 DEBUG [RS:0;3302f0f507bd:38463 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/3302f0f507bd:0, corePoolSize=3, maxPoolSize=3 2024-11-08T00:36:43,716 INFO [RS:0;3302f0f507bd:38463 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-08T00:36:43,716 INFO [RS:0;3302f0f507bd:38463 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-08T00:36:43,716 INFO [RS:0;3302f0f507bd:38463 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-08T00:36:43,716 INFO [RS:0;3302f0f507bd:38463 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-08T00:36:43,716 INFO [RS:0;3302f0f507bd:38463 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-08T00:36:43,716 INFO [RS:0;3302f0f507bd:38463 {}] hbase.ChoreService(168): Chore ScheduledChore name=3302f0f507bd,38463,1731026203220-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-08T00:36:43,731 INFO [RS:0;3302f0f507bd:38463 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-08T00:36:43,732 INFO [RS:0;3302f0f507bd:38463 {}] hbase.ChoreService(168): Chore ScheduledChore name=3302f0f507bd,38463,1731026203220-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-08T00:36:43,732 INFO [RS:0;3302f0f507bd:38463 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-08T00:36:43,732 INFO [RS:0;3302f0f507bd:38463 {}] regionserver.Replication(171): 3302f0f507bd,38463,1731026203220 started 2024-11-08T00:36:43,747 INFO [RS:0;3302f0f507bd:38463 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-08T00:36:43,747 INFO [RS:0;3302f0f507bd:38463 {}] regionserver.HRegionServer(1482): Serving as 3302f0f507bd,38463,1731026203220, RpcServer on 3302f0f507bd/172.17.0.3:38463, sessionid=0x10117dfad400001 2024-11-08T00:36:43,747 DEBUG [RS:0;3302f0f507bd:38463 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-08T00:36:43,747 DEBUG [RS:0;3302f0f507bd:38463 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 3302f0f507bd,38463,1731026203220 2024-11-08T00:36:43,747 DEBUG [RS:0;3302f0f507bd:38463 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '3302f0f507bd,38463,1731026203220' 2024-11-08T00:36:43,747 DEBUG [RS:0;3302f0f507bd:38463 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-08T00:36:43,748 DEBUG [RS:0;3302f0f507bd:38463 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-08T00:36:43,748 DEBUG [RS:0;3302f0f507bd:38463 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-08T00:36:43,748 DEBUG [RS:0;3302f0f507bd:38463 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-08T00:36:43,748 DEBUG [RS:0;3302f0f507bd:38463 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 3302f0f507bd,38463,1731026203220 2024-11-08T00:36:43,748 DEBUG [RS:0;3302f0f507bd:38463 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '3302f0f507bd,38463,1731026203220' 2024-11-08T00:36:43,748 DEBUG [RS:0;3302f0f507bd:38463 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-08T00:36:43,749 DEBUG [RS:0;3302f0f507bd:38463 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-08T00:36:43,749 DEBUG [RS:0;3302f0f507bd:38463 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-08T00:36:43,749 INFO [RS:0;3302f0f507bd:38463 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-08T00:36:43,749 INFO [RS:0;3302f0f507bd:38463 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-08T00:36:43,776 WARN [3302f0f507bd:46879 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-08T00:36:43,851 INFO [RS:0;3302f0f507bd:38463 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=3302f0f507bd%2C38463%2C1731026203220, suffix=, logDir=hdfs://localhost:34977/user/jenkins/test-data/71b20495-1467-5519-e967-680d6d9a6c0b/WALs/3302f0f507bd,38463,1731026203220, archiveDir=hdfs://localhost:34977/user/jenkins/test-data/71b20495-1467-5519-e967-680d6d9a6c0b/oldWALs, maxLogs=32 2024-11-08T00:36:43,851 INFO [RS:0;3302f0f507bd:38463 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3302f0f507bd%2C38463%2C1731026203220.1731026203851 2024-11-08T00:36:43,857 INFO [RS:0;3302f0f507bd:38463 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/71b20495-1467-5519-e967-680d6d9a6c0b/WALs/3302f0f507bd,38463,1731026203220/3302f0f507bd%2C38463%2C1731026203220.1731026203851 2024-11-08T00:36:43,858 DEBUG [RS:0;3302f0f507bd:38463 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:32831:32831),(127.0.0.1/127.0.0.1:33855:33855)] 2024-11-08T00:36:44,026 DEBUG [3302f0f507bd:46879 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-08T00:36:44,027 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=3302f0f507bd,38463,1731026203220 2024-11-08T00:36:44,029 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 3302f0f507bd,38463,1731026203220, state=OPENING 2024-11-08T00:36:44,037 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-08T00:36:44,048 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46879-0x10117dfad400000, quorum=127.0.0.1:56642, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T00:36:44,048 DEBUG [pool-700-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38463-0x10117dfad400001, quorum=127.0.0.1:56642, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T00:36:44,049 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-08T00:36:44,049 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-08T00:36:44,049 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-08T00:36:44,049 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=3302f0f507bd,38463,1731026203220}] 2024-11-08T00:36:44,203 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-08T00:36:44,205 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:43945, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-08T00:36:44,209 INFO [RS_OPEN_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-08T00:36:44,209 INFO [RS_OPEN_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-08T00:36:44,211 INFO [RS_OPEN_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=3302f0f507bd%2C38463%2C1731026203220.meta, suffix=.meta, logDir=hdfs://localhost:34977/user/jenkins/test-data/71b20495-1467-5519-e967-680d6d9a6c0b/WALs/3302f0f507bd,38463,1731026203220, archiveDir=hdfs://localhost:34977/user/jenkins/test-data/71b20495-1467-5519-e967-680d6d9a6c0b/oldWALs, maxLogs=32 2024-11-08T00:36:44,211 INFO [RS_OPEN_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 3302f0f507bd%2C38463%2C1731026203220.meta.1731026204211.meta 2024-11-08T00:36:44,216 INFO [RS_OPEN_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/71b20495-1467-5519-e967-680d6d9a6c0b/WALs/3302f0f507bd,38463,1731026203220/3302f0f507bd%2C38463%2C1731026203220.meta.1731026204211.meta 2024-11-08T00:36:44,217 DEBUG [RS_OPEN_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33855:33855),(127.0.0.1/127.0.0.1:32831:32831)] 2024-11-08T00:36:44,218 DEBUG [RS_OPEN_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-08T00:36:44,219 DEBUG [RS_OPEN_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-08T00:36:44,219 DEBUG [RS_OPEN_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-08T00:36:44,219 INFO [RS_OPEN_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-08T00:36:44,219 DEBUG [RS_OPEN_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-08T00:36:44,219 DEBUG [RS_OPEN_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-08T00:36:44,219 DEBUG [RS_OPEN_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-08T00:36:44,219 DEBUG [RS_OPEN_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-08T00:36:44,221 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-08T00:36:44,222 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-08T00:36:44,222 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T00:36:44,222 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-08T00:36:44,223 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-08T00:36:44,224 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-08T00:36:44,224 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T00:36:44,224 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-08T00:36:44,224 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-08T00:36:44,225 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-08T00:36:44,225 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T00:36:44,226 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-08T00:36:44,226 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-08T00:36:44,227 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-08T00:36:44,227 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T00:36:44,228 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-08T00:36:44,228 DEBUG [RS_OPEN_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-08T00:36:44,229 DEBUG [RS_OPEN_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34977/user/jenkins/test-data/71b20495-1467-5519-e967-680d6d9a6c0b/data/hbase/meta/1588230740 2024-11-08T00:36:44,230 DEBUG [RS_OPEN_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34977/user/jenkins/test-data/71b20495-1467-5519-e967-680d6d9a6c0b/data/hbase/meta/1588230740 2024-11-08T00:36:44,231 DEBUG [RS_OPEN_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-08T00:36:44,231 DEBUG [RS_OPEN_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-08T00:36:44,232 DEBUG [RS_OPEN_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-08T00:36:44,233 DEBUG [RS_OPEN_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-08T00:36:44,234 INFO [RS_OPEN_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=766612, jitterRate=-0.025203019380569458}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-08T00:36:44,234 DEBUG [RS_OPEN_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-08T00:36:44,235 DEBUG [RS_OPEN_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731026204219Writing region info on filesystem at 1731026204219Initializing all the Stores at 1731026204220 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731026204220Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731026204221 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731026204221Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731026204221Cleaning up temporary data from old regions at 1731026204231 (+10 ms)Running coprocessor post-open hooks at 1731026204234 (+3 ms)Region opened successfully at 1731026204235 (+1 ms) 2024-11-08T00:36:44,236 INFO [RS_OPEN_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731026204202 2024-11-08T00:36:44,239 DEBUG [RS_OPEN_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-08T00:36:44,239 INFO [RS_OPEN_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-08T00:36:44,240 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=3302f0f507bd,38463,1731026203220 2024-11-08T00:36:44,241 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 3302f0f507bd,38463,1731026203220, state=OPEN 2024-11-08T00:36:44,299 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46879-0x10117dfad400000, quorum=127.0.0.1:56642, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-08T00:36:44,299 DEBUG [pool-700-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38463-0x10117dfad400001, quorum=127.0.0.1:56642, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-08T00:36:44,299 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=3302f0f507bd,38463,1731026203220 2024-11-08T00:36:44,299 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-08T00:36:44,299 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-08T00:36:44,302 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-08T00:36:44,302 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=3302f0f507bd,38463,1731026203220 in 250 msec 2024-11-08T00:36:44,305 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-08T00:36:44,305 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 679 msec 2024-11-08T00:36:44,306 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-08T00:36:44,306 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-08T00:36:44,308 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-08T00:36:44,308 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3302f0f507bd,38463,1731026203220, seqNum=-1] 2024-11-08T00:36:44,309 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-08T00:36:44,310 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:56713, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-08T00:36:44,317 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 749 msec 2024-11-08T00:36:44,318 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731026204318, completionTime=-1 2024-11-08T00:36:44,318 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-08T00:36:44,318 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-08T00:36:44,321 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-08T00:36:44,321 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731026264321 2024-11-08T00:36:44,321 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731026324321 2024-11-08T00:36:44,321 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 2 msec 2024-11-08T00:36:44,321 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3302f0f507bd,46879,1731026203046-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-08T00:36:44,321 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3302f0f507bd,46879,1731026203046-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-08T00:36:44,321 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3302f0f507bd,46879,1731026203046-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-08T00:36:44,322 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-3302f0f507bd:46879, period=300000, unit=MILLISECONDS is enabled. 2024-11-08T00:36:44,322 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-08T00:36:44,322 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-08T00:36:44,324 DEBUG [master/3302f0f507bd:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-08T00:36:44,326 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.036sec 2024-11-08T00:36:44,326 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-08T00:36:44,327 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-08T00:36:44,327 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-08T00:36:44,327 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-08T00:36:44,327 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-08T00:36:44,327 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3302f0f507bd,46879,1731026203046-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-08T00:36:44,327 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3302f0f507bd,46879,1731026203046-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-08T00:36:44,329 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-08T00:36:44,329 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-08T00:36:44,330 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3302f0f507bd,46879,1731026203046-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-08T00:36:44,350 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@550e9c9b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-08T00:36:44,351 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 3302f0f507bd,46879,-1 for getting cluster id 2024-11-08T00:36:44,351 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-08T00:36:44,352 DEBUG [HMaster-EventLoopGroup-12-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'b052bb93-fbdb-4d54-91fc-da7e1fa5ecca' 2024-11-08T00:36:44,353 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-08T00:36:44,353 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "b052bb93-fbdb-4d54-91fc-da7e1fa5ecca" 2024-11-08T00:36:44,353 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2385b04f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-08T00:36:44,353 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [3302f0f507bd,46879,-1] 2024-11-08T00:36:44,354 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-08T00:36:44,354 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-08T00:36:44,355 INFO [HMaster-EventLoopGroup-12-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:36382, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-08T00:36:44,356 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@64509b84, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-08T00:36:44,357 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-08T00:36:44,358 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3302f0f507bd,38463,1731026203220, seqNum=-1] 2024-11-08T00:36:44,358 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-08T00:36:44,359 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:41362, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-08T00:36:44,361 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=3302f0f507bd,46879,1731026203046 2024-11-08T00:36:44,361 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-08T00:36:44,364 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-08T00:36:44,365 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-08T00:36:44,366 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.AsyncConnectionImpl(321): The fetched master address is 3302f0f507bd,46879,1731026203046 2024-11-08T00:36:44,366 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@33bc5d59 2024-11-08T00:36:44,366 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-08T00:36:44,367 INFO [HMaster-EventLoopGroup-12-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:36392, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-08T00:36:44,368 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46879 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-08T00:36:44,368 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46879 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-08T00:36:44,368 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46879 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.3 create 'TestLogRolling-testCompactionRecordDoesntBlockRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-08T00:36:44,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46879 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-08T00:36:44,371 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-08T00:36:44,371 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T00:36:44,371 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46879 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.3 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testCompactionRecordDoesntBlockRolling" procId is: 4 2024-11-08T00:36:44,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46879 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-08T00:36:44,373 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-08T00:36:44,379 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741835_1011 (size=405) 2024-11-08T00:36:44,379 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46139 is added to blk_1073741835_1011 (size=405) 2024-11-08T00:36:44,381 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 8f8f623d37240aa31c757481007843e7, NAME => 'TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731026204368.8f8f623d37240aa31c757481007843e7.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testCompactionRecordDoesntBlockRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:34977/user/jenkins/test-data/71b20495-1467-5519-e967-680d6d9a6c0b 2024-11-08T00:36:44,388 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46139 is added to blk_1073741836_1012 (size=88) 2024-11-08T00:36:44,388 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741836_1012 (size=88) 2024-11-08T00:36:44,389 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731026204368.8f8f623d37240aa31c757481007843e7.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-08T00:36:44,389 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1722): Closing 8f8f623d37240aa31c757481007843e7, disabling compactions & flushes 2024-11-08T00:36:44,389 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731026204368.8f8f623d37240aa31c757481007843e7. 2024-11-08T00:36:44,389 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731026204368.8f8f623d37240aa31c757481007843e7. 2024-11-08T00:36:44,389 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731026204368.8f8f623d37240aa31c757481007843e7. after waiting 0 ms 2024-11-08T00:36:44,389 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731026204368.8f8f623d37240aa31c757481007843e7. 2024-11-08T00:36:44,389 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731026204368.8f8f623d37240aa31c757481007843e7. 2024-11-08T00:36:44,389 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for 8f8f623d37240aa31c757481007843e7: Waiting for close lock at 1731026204389Disabling compacts and flushes for region at 1731026204389Disabling writes for close at 1731026204389Writing region close event to WAL at 1731026204389Closed at 1731026204389 2024-11-08T00:36:44,390 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-08T00:36:44,391 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731026204368.8f8f623d37240aa31c757481007843e7.","families":{"info":[{"qualifier":"regioninfo","vlen":87,"tag":[],"timestamp":"1731026204390"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731026204390"}]},"ts":"1731026204390"} 2024-11-08T00:36:44,393 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-08T00:36:44,395 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-08T00:36:44,395 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731026204395"}]},"ts":"1731026204395"} 2024-11-08T00:36:44,397 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testCompactionRecordDoesntBlockRolling, state=ENABLING in hbase:meta 2024-11-08T00:36:44,398 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=8f8f623d37240aa31c757481007843e7, ASSIGN}] 2024-11-08T00:36:44,399 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=8f8f623d37240aa31c757481007843e7, ASSIGN 2024-11-08T00:36:44,400 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=8f8f623d37240aa31c757481007843e7, ASSIGN; state=OFFLINE, location=3302f0f507bd,38463,1731026203220; forceNewPlan=false, retain=false 2024-11-08T00:36:44,551 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=8f8f623d37240aa31c757481007843e7, regionState=OPENING, regionLocation=3302f0f507bd,38463,1731026203220 2024-11-08T00:36:44,553 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-13-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=8f8f623d37240aa31c757481007843e7, ASSIGN because future has completed 2024-11-08T00:36:44,554 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 8f8f623d37240aa31c757481007843e7, server=3302f0f507bd,38463,1731026203220}] 2024-11-08T00:36:44,676 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,39151,1731026116929/3302f0f507bd%2C39151%2C1731026116929.1731026117246 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:36:44,679 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.meta.1731026116690.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:36:44,711 INFO [RS_OPEN_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731026204368.8f8f623d37240aa31c757481007843e7. 2024-11-08T00:36:44,712 DEBUG [RS_OPEN_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 8f8f623d37240aa31c757481007843e7, NAME => 'TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731026204368.8f8f623d37240aa31c757481007843e7.', STARTKEY => '', ENDKEY => ''} 2024-11-08T00:36:44,712 DEBUG [RS_OPEN_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testCompactionRecordDoesntBlockRolling 8f8f623d37240aa31c757481007843e7 2024-11-08T00:36:44,712 DEBUG [RS_OPEN_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731026204368.8f8f623d37240aa31c757481007843e7.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-08T00:36:44,712 DEBUG [RS_OPEN_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 8f8f623d37240aa31c757481007843e7 2024-11-08T00:36:44,712 DEBUG [RS_OPEN_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 8f8f623d37240aa31c757481007843e7 2024-11-08T00:36:44,714 INFO [StoreOpener-8f8f623d37240aa31c757481007843e7-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 8f8f623d37240aa31c757481007843e7 2024-11-08T00:36:44,716 INFO [StoreOpener-8f8f623d37240aa31c757481007843e7-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 8f8f623d37240aa31c757481007843e7 columnFamilyName info 2024-11-08T00:36:44,716 DEBUG [StoreOpener-8f8f623d37240aa31c757481007843e7-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T00:36:44,716 INFO [StoreOpener-8f8f623d37240aa31c757481007843e7-1 {}] regionserver.HStore(327): Store=8f8f623d37240aa31c757481007843e7/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-08T00:36:44,717 DEBUG [RS_OPEN_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 8f8f623d37240aa31c757481007843e7 2024-11-08T00:36:44,717 DEBUG [RS_OPEN_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34977/user/jenkins/test-data/71b20495-1467-5519-e967-680d6d9a6c0b/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8f8f623d37240aa31c757481007843e7 2024-11-08T00:36:44,718 DEBUG [RS_OPEN_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34977/user/jenkins/test-data/71b20495-1467-5519-e967-680d6d9a6c0b/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8f8f623d37240aa31c757481007843e7 2024-11-08T00:36:44,718 DEBUG [RS_OPEN_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 8f8f623d37240aa31c757481007843e7 2024-11-08T00:36:44,718 DEBUG [RS_OPEN_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 8f8f623d37240aa31c757481007843e7 2024-11-08T00:36:44,720 DEBUG [RS_OPEN_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 8f8f623d37240aa31c757481007843e7 2024-11-08T00:36:44,722 DEBUG [RS_OPEN_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34977/user/jenkins/test-data/71b20495-1467-5519-e967-680d6d9a6c0b/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8f8f623d37240aa31c757481007843e7/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-08T00:36:44,722 INFO [RS_OPEN_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 8f8f623d37240aa31c757481007843e7; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=809997, jitterRate=0.029964834451675415}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-08T00:36:44,722 DEBUG [RS_OPEN_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 8f8f623d37240aa31c757481007843e7 2024-11-08T00:36:44,723 DEBUG [RS_OPEN_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 8f8f623d37240aa31c757481007843e7: Running coprocessor pre-open hook at 1731026204712Writing region info on filesystem at 1731026204712Initializing all the Stores at 1731026204713 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731026204713Cleaning up temporary data from old regions at 1731026204718 (+5 ms)Running coprocessor post-open hooks at 1731026204722 (+4 ms)Region opened successfully at 1731026204723 (+1 ms) 2024-11-08T00:36:44,724 INFO [RS_OPEN_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731026204368.8f8f623d37240aa31c757481007843e7., pid=6, masterSystemTime=1731026204707 2024-11-08T00:36:44,726 DEBUG [RS_OPEN_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731026204368.8f8f623d37240aa31c757481007843e7. 2024-11-08T00:36:44,726 INFO [RS_OPEN_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731026204368.8f8f623d37240aa31c757481007843e7. 2024-11-08T00:36:44,727 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=8f8f623d37240aa31c757481007843e7, regionState=OPEN, openSeqNum=2, regionLocation=3302f0f507bd,38463,1731026203220 2024-11-08T00:36:44,729 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-13-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 8f8f623d37240aa31c757481007843e7, server=3302f0f507bd,38463,1731026203220 because future has completed 2024-11-08T00:36:44,733 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-08T00:36:44,733 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 8f8f623d37240aa31c757481007843e7, server=3302f0f507bd,38463,1731026203220 in 176 msec 2024-11-08T00:36:44,735 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-08T00:36:44,735 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=8f8f623d37240aa31c757481007843e7, ASSIGN in 335 msec 2024-11-08T00:36:44,736 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-08T00:36:44,736 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731026204736"}]},"ts":"1731026204736"} 2024-11-08T00:36:44,738 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testCompactionRecordDoesntBlockRolling, state=ENABLED in hbase:meta 2024-11-08T00:36:44,739 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-08T00:36:44,741 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 371 msec 2024-11-08T00:36:45,677 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,39151,1731026116929/3302f0f507bd%2C39151%2C1731026116929.1731026117246 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:36:45,679 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.meta.1731026116690.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:36:46,678 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,39151,1731026116929/3302f0f507bd%2C39151%2C1731026116929.1731026117246 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:36:46,680 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.meta.1731026116690.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:36:47,472 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-08T00:36:47,474 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:36:47,474 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:36:47,475 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:36:47,475 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:36:47,476 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:36:47,477 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:36:47,499 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:36:47,500 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:36:47,500 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:36:47,500 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:36:47,500 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:36:47,500 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:36:47,504 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:36:47,504 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:36:47,504 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:36:47,506 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:36:47,679 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,39151,1731026116929/3302f0f507bd%2C39151%2C1731026116929.1731026117246 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:36:47,681 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.meta.1731026116690.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:36:48,679 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,39151,1731026116929/3302f0f507bd%2C39151%2C1731026116929.1731026117246 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:36:48,681 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.meta.1731026116690.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:36:49,680 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,39151,1731026116929/3302f0f507bd%2C39151%2C1731026116929.1731026117246 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:36:49,682 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.meta.1731026116690.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:36:49,711 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-08T00:36:49,712 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testCompactionRecordDoesntBlockRolling' 2024-11-08T00:36:50,681 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,39151,1731026116929/3302f0f507bd%2C39151%2C1731026116929.1731026117246 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:36:50,683 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.meta.1731026116690.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:36:51,682 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,39151,1731026116929/3302f0f507bd%2C39151%2C1731026116929.1731026117246 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:36:51,683 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.meta.1731026116690.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:36:51,966 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-08T00:36:51,966 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-08T00:36:51,967 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-08T00:36:51,967 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-08T00:36:51,967 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-08T00:36:51,967 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-08T00:36:51,968 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-08T00:36:51,968 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling Metrics about Tables on a single HBase RegionServer 2024-11-08T00:36:52,682 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,39151,1731026116929/3302f0f507bd%2C39151%2C1731026116929.1731026117246 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:36:52,684 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.meta.1731026116690.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:36:53,683 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,39151,1731026116929/3302f0f507bd%2C39151%2C1731026116929.1731026117246 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:36:53,684 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.meta.1731026116690.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:36:54,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46879 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-08T00:36:54,444 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-08T00:36:54,444 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testCompactionRecordDoesntBlockRolling,, stopping at row=TestLogRolling-testCompactionRecordDoesntBlockRolling ,, for max=2147483647 with caching=100 2024-11-08T00:36:54,448 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-08T00:36:54,448 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731026204368.8f8f623d37240aa31c757481007843e7. 2024-11-08T00:36:54,452 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testCompactionRecordDoesntBlockRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731026204368.8f8f623d37240aa31c757481007843e7., hostname=3302f0f507bd,38463,1731026203220, seqNum=2] 2024-11-08T00:36:54,459 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46879 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.3 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-08T00:36:54,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46879 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-08T00:36:54,464 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-08T00:36:54,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46879 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-08T00:36:54,466 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-08T00:36:54,467 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-08T00:36:54,628 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38463 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-11-08T00:36:54,629 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3302f0f507bd:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731026204368.8f8f623d37240aa31c757481007843e7. 2024-11-08T00:36:54,629 INFO [RS_FLUSH_OPERATIONS-regionserver/3302f0f507bd:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing 8f8f623d37240aa31c757481007843e7 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-08T00:36:54,647 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3302f0f507bd:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34977/user/jenkins/test-data/71b20495-1467-5519-e967-680d6d9a6c0b/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8f8f623d37240aa31c757481007843e7/.tmp/info/35f0b4631f124ef7bdf310bba3bbe49e is 1080, key is row0001/info:/1731026214453/Put/seqid=0 2024-11-08T00:36:54,654 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741837_1013 (size=6033) 2024-11-08T00:36:54,655 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46139 is added to blk_1073741837_1013 (size=6033) 2024-11-08T00:36:54,655 INFO [RS_FLUSH_OPERATIONS-regionserver/3302f0f507bd:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:34977/user/jenkins/test-data/71b20495-1467-5519-e967-680d6d9a6c0b/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8f8f623d37240aa31c757481007843e7/.tmp/info/35f0b4631f124ef7bdf310bba3bbe49e 2024-11-08T00:36:54,663 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3302f0f507bd:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34977/user/jenkins/test-data/71b20495-1467-5519-e967-680d6d9a6c0b/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8f8f623d37240aa31c757481007843e7/.tmp/info/35f0b4631f124ef7bdf310bba3bbe49e as hdfs://localhost:34977/user/jenkins/test-data/71b20495-1467-5519-e967-680d6d9a6c0b/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8f8f623d37240aa31c757481007843e7/info/35f0b4631f124ef7bdf310bba3bbe49e 2024-11-08T00:36:54,671 INFO [RS_FLUSH_OPERATIONS-regionserver/3302f0f507bd:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34977/user/jenkins/test-data/71b20495-1467-5519-e967-680d6d9a6c0b/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8f8f623d37240aa31c757481007843e7/info/35f0b4631f124ef7bdf310bba3bbe49e, entries=1, sequenceid=5, filesize=5.9 K 2024-11-08T00:36:54,673 INFO [RS_FLUSH_OPERATIONS-regionserver/3302f0f507bd:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 8f8f623d37240aa31c757481007843e7 in 44ms, sequenceid=5, compaction requested=false 2024-11-08T00:36:54,673 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3302f0f507bd:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for 8f8f623d37240aa31c757481007843e7: 2024-11-08T00:36:54,673 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3302f0f507bd:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731026204368.8f8f623d37240aa31c757481007843e7. 2024-11-08T00:36:54,675 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3302f0f507bd:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-11-08T00:36:54,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46879 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-11-08T00:36:54,682 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-11-08T00:36:54,682 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 212 msec 2024-11-08T00:36:54,684 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,39151,1731026116929/3302f0f507bd%2C39151%2C1731026116929.1731026117246 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:36:54,685 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.meta.1731026116690.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:36:54,685 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 223 msec 2024-11-08T00:36:55,684 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,39151,1731026116929/3302f0f507bd%2C39151%2C1731026116929.1731026117246 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:36:55,685 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.meta.1731026116690.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:36:56,685 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,39151,1731026116929/3302f0f507bd%2C39151%2C1731026116929.1731026117246 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:36:56,686 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.meta.1731026116690.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:36:57,686 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,39151,1731026116929/3302f0f507bd%2C39151%2C1731026116929.1731026117246 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:36:57,686 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.meta.1731026116690.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:36:58,687 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,39151,1731026116929/3302f0f507bd%2C39151%2C1731026116929.1731026117246 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:36:58,687 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.meta.1731026116690.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:36:59,688 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,39151,1731026116929/3302f0f507bd%2C39151%2C1731026116929.1731026117246 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:36:59,688 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.meta.1731026116690.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:37:00,689 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,39151,1731026116929/3302f0f507bd%2C39151%2C1731026116929.1731026117246 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:37:00,689 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.meta.1731026116690.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:37:01,690 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.meta.1731026116690.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:37:01,690 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,39151,1731026116929/3302f0f507bd%2C39151%2C1731026116929.1731026117246 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:37:02,690 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.meta.1731026116690.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:37:02,690 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,39151,1731026116929/3302f0f507bd%2C39151%2C1731026116929.1731026117246 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:37:03,691 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.meta.1731026116690.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:37:03,691 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,39151,1731026116929/3302f0f507bd%2C39151%2C1731026116929.1731026117246 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:37:03,692 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,39151,1731026116929/3302f0f507bd%2C39151%2C1731026116929.1731026117246 after 68058ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor202.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-08T00:37:03,692 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.meta.1731026116690.meta after 68042ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor202.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-08T00:37:04,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46879 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-08T00:37:04,534 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-08T00:37:04,536 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46879 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.3 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-08T00:37:04,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46879 {}] procedure2.ProcedureExecutor(1139): Stored pid=9, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-08T00:37:04,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46879 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=9 2024-11-08T00:37:04,539 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=9, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-08T00:37:04,541 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=9, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-08T00:37:04,541 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-08T00:37:04,693 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,39151,1731026116929/3302f0f507bd%2C39151%2C1731026116929.1731026117246 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:37:04,693 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.meta.1731026116690.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:37:04,695 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38463 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=10 2024-11-08T00:37:04,696 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3302f0f507bd:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731026204368.8f8f623d37240aa31c757481007843e7. 2024-11-08T00:37:04,696 INFO [RS_FLUSH_OPERATIONS-regionserver/3302f0f507bd:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(2902): Flushing 8f8f623d37240aa31c757481007843e7 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-08T00:37:04,706 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3302f0f507bd:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34977/user/jenkins/test-data/71b20495-1467-5519-e967-680d6d9a6c0b/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8f8f623d37240aa31c757481007843e7/.tmp/info/1bafac5e4975431ba6bd3e2d86c0f71c is 1080, key is row0002/info:/1731026224535/Put/seqid=0 2024-11-08T00:37:04,711 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46139 is added to blk_1073741838_1014 (size=6033) 2024-11-08T00:37:04,712 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741838_1014 (size=6033) 2024-11-08T00:37:04,712 INFO [RS_FLUSH_OPERATIONS-regionserver/3302f0f507bd:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=9 (bloomFilter=true), to=hdfs://localhost:34977/user/jenkins/test-data/71b20495-1467-5519-e967-680d6d9a6c0b/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8f8f623d37240aa31c757481007843e7/.tmp/info/1bafac5e4975431ba6bd3e2d86c0f71c 2024-11-08T00:37:04,720 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3302f0f507bd:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34977/user/jenkins/test-data/71b20495-1467-5519-e967-680d6d9a6c0b/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8f8f623d37240aa31c757481007843e7/.tmp/info/1bafac5e4975431ba6bd3e2d86c0f71c as hdfs://localhost:34977/user/jenkins/test-data/71b20495-1467-5519-e967-680d6d9a6c0b/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8f8f623d37240aa31c757481007843e7/info/1bafac5e4975431ba6bd3e2d86c0f71c 2024-11-08T00:37:04,726 INFO [RS_FLUSH_OPERATIONS-regionserver/3302f0f507bd:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34977/user/jenkins/test-data/71b20495-1467-5519-e967-680d6d9a6c0b/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8f8f623d37240aa31c757481007843e7/info/1bafac5e4975431ba6bd3e2d86c0f71c, entries=1, sequenceid=9, filesize=5.9 K 2024-11-08T00:37:04,727 INFO [RS_FLUSH_OPERATIONS-regionserver/3302f0f507bd:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 8f8f623d37240aa31c757481007843e7 in 31ms, sequenceid=9, compaction requested=false 2024-11-08T00:37:04,727 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3302f0f507bd:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(2603): Flush status journal for 8f8f623d37240aa31c757481007843e7: 2024-11-08T00:37:04,727 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3302f0f507bd:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731026204368.8f8f623d37240aa31c757481007843e7. 2024-11-08T00:37:04,727 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3302f0f507bd:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=10 2024-11-08T00:37:04,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46879 {}] master.HMaster(4169): Remote procedure done, pid=10 2024-11-08T00:37:04,732 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=10, resume processing ppid=9 2024-11-08T00:37:04,732 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=9, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 188 msec 2024-11-08T00:37:04,735 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 196 msec 2024-11-08T00:37:05,694 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,39151,1731026116929/3302f0f507bd%2C39151%2C1731026116929.1731026117246 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:37:05,694 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.meta.1731026116690.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:37:06,695 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,39151,1731026116929/3302f0f507bd%2C39151%2C1731026116929.1731026117246 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:37:06,695 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.meta.1731026116690.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:37:07,696 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.meta.1731026116690.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:37:07,696 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,39151,1731026116929/3302f0f507bd%2C39151%2C1731026116929.1731026117246 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:37:08,697 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.meta.1731026116690.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:37:08,697 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,39151,1731026116929/3302f0f507bd%2C39151%2C1731026116929.1731026117246 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:37:09,697 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.meta.1731026116690.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:37:09,697 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,39151,1731026116929/3302f0f507bd%2C39151%2C1731026116929.1731026117246 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:37:10,699 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,39151,1731026116929/3302f0f507bd%2C39151%2C1731026116929.1731026117246 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:37:10,699 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.meta.1731026116690.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:37:11,700 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,39151,1731026116929/3302f0f507bd%2C39151%2C1731026116929.1731026117246 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:37:11,700 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.meta.1731026116690.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:37:12,702 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,39151,1731026116929/3302f0f507bd%2C39151%2C1731026116929.1731026117246 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:37:12,702 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.meta.1731026116690.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:37:13,027 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-08T00:37:13,702 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.meta.1731026116690.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:37:13,702 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,39151,1731026116929/3302f0f507bd%2C39151%2C1731026116929.1731026117246 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:37:14,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46879 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=9 2024-11-08T00:37:14,645 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-08T00:37:14,652 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3302f0f507bd%2C38463%2C1731026203220.1731026234652 2024-11-08T00:37:14,658 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:37:14,658 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:37:14,658 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:37:14,658 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:37:14,658 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:37:14,659 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/71b20495-1467-5519-e967-680d6d9a6c0b/WALs/3302f0f507bd,38463,1731026203220/3302f0f507bd%2C38463%2C1731026203220.1731026203851 with entries=8, filesize=5.41 KB; new WAL /user/jenkins/test-data/71b20495-1467-5519-e967-680d6d9a6c0b/WALs/3302f0f507bd,38463,1731026203220/3302f0f507bd%2C38463%2C1731026203220.1731026234652 2024-11-08T00:37:14,659 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33855:33855),(127.0.0.1/127.0.0.1:32831:32831)] 2024-11-08T00:37:14,659 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:34977/user/jenkins/test-data/71b20495-1467-5519-e967-680d6d9a6c0b/WALs/3302f0f507bd,38463,1731026203220/3302f0f507bd%2C38463%2C1731026203220.1731026203851 is not closed yet, will try archiving it next time 2024-11-08T00:37:14,660 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46879 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.3 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-08T00:37:14,660 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46139 is added to blk_1073741833_1009 (size=5546) 2024-11-08T00:37:14,661 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741833_1009 (size=5546) 2024-11-08T00:37:14,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46879 {}] procedure2.ProcedureExecutor(1139): Stored pid=11, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-08T00:37:14,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46879 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=11 2024-11-08T00:37:14,663 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=11, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-08T00:37:14,664 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=11, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-08T00:37:14,664 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=12, ppid=11, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-08T00:37:14,703 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,39151,1731026116929/3302f0f507bd%2C39151%2C1731026116929.1731026117246 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:37:14,703 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.meta.1731026116690.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:37:14,817 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38463 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=12 2024-11-08T00:37:14,818 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3302f0f507bd:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731026204368.8f8f623d37240aa31c757481007843e7. 2024-11-08T00:37:14,819 INFO [RS_FLUSH_OPERATIONS-regionserver/3302f0f507bd:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(2902): Flushing 8f8f623d37240aa31c757481007843e7 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-08T00:37:14,826 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3302f0f507bd:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34977/user/jenkins/test-data/71b20495-1467-5519-e967-680d6d9a6c0b/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8f8f623d37240aa31c757481007843e7/.tmp/info/9d2ffdbf14f943ddb8574b3e635d55ff is 1080, key is row0003/info:/1731026234648/Put/seqid=0 2024-11-08T00:37:14,833 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741840_1016 (size=6033) 2024-11-08T00:37:14,833 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46139 is added to blk_1073741840_1016 (size=6033) 2024-11-08T00:37:14,834 INFO [RS_FLUSH_OPERATIONS-regionserver/3302f0f507bd:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:34977/user/jenkins/test-data/71b20495-1467-5519-e967-680d6d9a6c0b/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8f8f623d37240aa31c757481007843e7/.tmp/info/9d2ffdbf14f943ddb8574b3e635d55ff 2024-11-08T00:37:14,843 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3302f0f507bd:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34977/user/jenkins/test-data/71b20495-1467-5519-e967-680d6d9a6c0b/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8f8f623d37240aa31c757481007843e7/.tmp/info/9d2ffdbf14f943ddb8574b3e635d55ff as hdfs://localhost:34977/user/jenkins/test-data/71b20495-1467-5519-e967-680d6d9a6c0b/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8f8f623d37240aa31c757481007843e7/info/9d2ffdbf14f943ddb8574b3e635d55ff 2024-11-08T00:37:14,849 INFO [RS_FLUSH_OPERATIONS-regionserver/3302f0f507bd:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34977/user/jenkins/test-data/71b20495-1467-5519-e967-680d6d9a6c0b/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8f8f623d37240aa31c757481007843e7/info/9d2ffdbf14f943ddb8574b3e635d55ff, entries=1, sequenceid=13, filesize=5.9 K 2024-11-08T00:37:14,850 INFO [RS_FLUSH_OPERATIONS-regionserver/3302f0f507bd:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 8f8f623d37240aa31c757481007843e7 in 32ms, sequenceid=13, compaction requested=true 2024-11-08T00:37:14,850 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3302f0f507bd:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(2603): Flush status journal for 8f8f623d37240aa31c757481007843e7: 2024-11-08T00:37:14,850 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3302f0f507bd:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731026204368.8f8f623d37240aa31c757481007843e7. 2024-11-08T00:37:14,850 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3302f0f507bd:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=12 2024-11-08T00:37:14,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46879 {}] master.HMaster(4169): Remote procedure done, pid=12 2024-11-08T00:37:14,856 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=12, resume processing ppid=11 2024-11-08T00:37:14,856 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, ppid=11, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 188 msec 2024-11-08T00:37:14,858 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 197 msec 2024-11-08T00:37:15,704 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,39151,1731026116929/3302f0f507bd%2C39151%2C1731026116929.1731026117246 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:37:15,704 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.meta.1731026116690.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:37:16,705 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.meta.1731026116690.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:37:16,705 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,39151,1731026116929/3302f0f507bd%2C39151%2C1731026116929.1731026117246 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:37:17,706 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,39151,1731026116929/3302f0f507bd%2C39151%2C1731026116929.1731026117246 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:37:17,706 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.meta.1731026116690.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:37:18,707 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,39151,1731026116929/3302f0f507bd%2C39151%2C1731026116929.1731026117246 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:37:18,707 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.meta.1731026116690.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:37:19,708 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.meta.1731026116690.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:37:19,708 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,39151,1731026116929/3302f0f507bd%2C39151%2C1731026116929.1731026117246 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:37:20,710 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.meta.1731026116690.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:37:20,710 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,39151,1731026116929/3302f0f507bd%2C39151%2C1731026116929.1731026117246 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:37:21,710 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.meta.1731026116690.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:37:21,710 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,39151,1731026116929/3302f0f507bd%2C39151%2C1731026116929.1731026117246 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:37:22,712 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,39151,1731026116929/3302f0f507bd%2C39151%2C1731026116929.1731026117246 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:37:22,712 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.meta.1731026116690.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:37:23,713 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.meta.1731026116690.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:37:23,713 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,39151,1731026116929/3302f0f507bd%2C39151%2C1731026116929.1731026117246 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:37:24,330 INFO [master/3302f0f507bd:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-08T00:37:24,330 INFO [master/3302f0f507bd:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-08T00:37:24,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46879 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=11 2024-11-08T00:37:24,685 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-08T00:37:24,685 DEBUG [Time-limited test {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-08T00:37:24,688 DEBUG [Time-limited test {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 18099 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-08T00:37:24,688 DEBUG [Time-limited test {}] regionserver.HStore(1541): 8f8f623d37240aa31c757481007843e7/info is initiating minor compaction (all files) 2024-11-08T00:37:24,688 INFO [Time-limited test {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-08T00:37:24,688 INFO [Time-limited test {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-08T00:37:24,688 INFO [Time-limited test {}] regionserver.HRegion(2416): Starting compaction of 8f8f623d37240aa31c757481007843e7/info in TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731026204368.8f8f623d37240aa31c757481007843e7. 2024-11-08T00:37:24,689 INFO [Time-limited test {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34977/user/jenkins/test-data/71b20495-1467-5519-e967-680d6d9a6c0b/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8f8f623d37240aa31c757481007843e7/info/35f0b4631f124ef7bdf310bba3bbe49e, hdfs://localhost:34977/user/jenkins/test-data/71b20495-1467-5519-e967-680d6d9a6c0b/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8f8f623d37240aa31c757481007843e7/info/1bafac5e4975431ba6bd3e2d86c0f71c, hdfs://localhost:34977/user/jenkins/test-data/71b20495-1467-5519-e967-680d6d9a6c0b/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8f8f623d37240aa31c757481007843e7/info/9d2ffdbf14f943ddb8574b3e635d55ff] into tmpdir=hdfs://localhost:34977/user/jenkins/test-data/71b20495-1467-5519-e967-680d6d9a6c0b/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8f8f623d37240aa31c757481007843e7/.tmp, totalSize=17.7 K 2024-11-08T00:37:24,690 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 35f0b4631f124ef7bdf310bba3bbe49e, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=5, earliestPutTs=1731026214453 2024-11-08T00:37:24,691 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 1bafac5e4975431ba6bd3e2d86c0f71c, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=9, earliestPutTs=1731026224535 2024-11-08T00:37:24,692 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 9d2ffdbf14f943ddb8574b3e635d55ff, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1731026234648 2024-11-08T00:37:24,706 INFO [Time-limited test {}] throttle.PressureAwareThroughputController(145): 8f8f623d37240aa31c757481007843e7#info#compaction#45 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-08T00:37:24,707 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34977/user/jenkins/test-data/71b20495-1467-5519-e967-680d6d9a6c0b/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8f8f623d37240aa31c757481007843e7/.tmp/info/30bb7735a4444d98bc2cd65a73e3a7f0 is 1080, key is row0001/info:/1731026214453/Put/seqid=0 2024-11-08T00:37:24,711 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741841_1017 (size=8296) 2024-11-08T00:37:24,711 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46139 is added to blk_1073741841_1017 (size=8296) 2024-11-08T00:37:24,713 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,39151,1731026116929/3302f0f507bd%2C39151%2C1731026116929.1731026117246 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:37:24,713 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.meta.1731026116690.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:37:24,717 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34977/user/jenkins/test-data/71b20495-1467-5519-e967-680d6d9a6c0b/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8f8f623d37240aa31c757481007843e7/.tmp/info/30bb7735a4444d98bc2cd65a73e3a7f0 as hdfs://localhost:34977/user/jenkins/test-data/71b20495-1467-5519-e967-680d6d9a6c0b/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8f8f623d37240aa31c757481007843e7/info/30bb7735a4444d98bc2cd65a73e3a7f0 2024-11-08T00:37:24,724 INFO [Time-limited test {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 8f8f623d37240aa31c757481007843e7/info of 8f8f623d37240aa31c757481007843e7 into 30bb7735a4444d98bc2cd65a73e3a7f0(size=8.1 K), total size for store is 8.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-08T00:37:24,725 DEBUG [Time-limited test {}] regionserver.HRegion(2446): Compaction status journal for 8f8f623d37240aa31c757481007843e7: 2024-11-08T00:37:24,727 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3302f0f507bd%2C38463%2C1731026203220.1731026244727 2024-11-08T00:37:24,732 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:37:24,732 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:37:24,732 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:37:24,732 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:37:24,733 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:37:24,733 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/71b20495-1467-5519-e967-680d6d9a6c0b/WALs/3302f0f507bd,38463,1731026203220/3302f0f507bd%2C38463%2C1731026203220.1731026234652 with entries=4, filesize=2.45 KB; new WAL /user/jenkins/test-data/71b20495-1467-5519-e967-680d6d9a6c0b/WALs/3302f0f507bd,38463,1731026203220/3302f0f507bd%2C38463%2C1731026203220.1731026244727 2024-11-08T00:37:24,733 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33855:33855),(127.0.0.1/127.0.0.1:32831:32831)] 2024-11-08T00:37:24,733 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:34977/user/jenkins/test-data/71b20495-1467-5519-e967-680d6d9a6c0b/WALs/3302f0f507bd,38463,1731026203220/3302f0f507bd%2C38463%2C1731026203220.1731026234652 is not closed yet, will try archiving it next time 2024-11-08T00:37:24,734 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741839_1015 (size=2520) 2024-11-08T00:37:24,734 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46139 is added to blk_1073741839_1015 (size=2520) 2024-11-08T00:37:24,735 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:34977/user/jenkins/test-data/71b20495-1467-5519-e967-680d6d9a6c0b/WALs/3302f0f507bd,38463,1731026203220/3302f0f507bd%2C38463%2C1731026203220.1731026203851 to hdfs://localhost:34977/user/jenkins/test-data/71b20495-1467-5519-e967-680d6d9a6c0b/oldWALs/3302f0f507bd%2C38463%2C1731026203220.1731026203851 2024-11-08T00:37:24,736 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46879 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.3 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-08T00:37:24,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46879 {}] procedure2.ProcedureExecutor(1139): Stored pid=13, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-08T00:37:24,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46879 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=13 2024-11-08T00:37:24,738 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=13, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-08T00:37:24,738 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=13, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-08T00:37:24,738 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=14, ppid=13, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-08T00:37:24,891 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38463 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=14 2024-11-08T00:37:24,892 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3302f0f507bd:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731026204368.8f8f623d37240aa31c757481007843e7. 2024-11-08T00:37:24,892 INFO [RS_FLUSH_OPERATIONS-regionserver/3302f0f507bd:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(2902): Flushing 8f8f623d37240aa31c757481007843e7 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-08T00:37:24,900 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3302f0f507bd:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34977/user/jenkins/test-data/71b20495-1467-5519-e967-680d6d9a6c0b/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8f8f623d37240aa31c757481007843e7/.tmp/info/3d21c72130a9495fabb1a6d68436841d is 1080, key is row0000/info:/1731026244726/Put/seqid=0 2024-11-08T00:37:24,905 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741843_1019 (size=6033) 2024-11-08T00:37:24,905 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46139 is added to blk_1073741843_1019 (size=6033) 2024-11-08T00:37:24,906 INFO [RS_FLUSH_OPERATIONS-regionserver/3302f0f507bd:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=18 (bloomFilter=true), to=hdfs://localhost:34977/user/jenkins/test-data/71b20495-1467-5519-e967-680d6d9a6c0b/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8f8f623d37240aa31c757481007843e7/.tmp/info/3d21c72130a9495fabb1a6d68436841d 2024-11-08T00:37:24,912 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3302f0f507bd:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34977/user/jenkins/test-data/71b20495-1467-5519-e967-680d6d9a6c0b/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8f8f623d37240aa31c757481007843e7/.tmp/info/3d21c72130a9495fabb1a6d68436841d as hdfs://localhost:34977/user/jenkins/test-data/71b20495-1467-5519-e967-680d6d9a6c0b/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8f8f623d37240aa31c757481007843e7/info/3d21c72130a9495fabb1a6d68436841d 2024-11-08T00:37:24,918 INFO [RS_FLUSH_OPERATIONS-regionserver/3302f0f507bd:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34977/user/jenkins/test-data/71b20495-1467-5519-e967-680d6d9a6c0b/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8f8f623d37240aa31c757481007843e7/info/3d21c72130a9495fabb1a6d68436841d, entries=1, sequenceid=18, filesize=5.9 K 2024-11-08T00:37:24,919 INFO [RS_FLUSH_OPERATIONS-regionserver/3302f0f507bd:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 8f8f623d37240aa31c757481007843e7 in 26ms, sequenceid=18, compaction requested=false 2024-11-08T00:37:24,919 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3302f0f507bd:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(2603): Flush status journal for 8f8f623d37240aa31c757481007843e7: 2024-11-08T00:37:24,919 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3302f0f507bd:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731026204368.8f8f623d37240aa31c757481007843e7. 2024-11-08T00:37:24,919 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3302f0f507bd:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=14 2024-11-08T00:37:24,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46879 {}] master.HMaster(4169): Remote procedure done, pid=14 2024-11-08T00:37:24,922 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=14, resume processing ppid=13 2024-11-08T00:37:24,922 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=14, ppid=13, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 182 msec 2024-11-08T00:37:24,925 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 187 msec 2024-11-08T00:37:25,714 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,39151,1731026116929/3302f0f507bd%2C39151%2C1731026116929.1731026117246 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:37:25,714 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.meta.1731026116690.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:37:26,715 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.meta.1731026116690.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:37:26,715 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,39151,1731026116929/3302f0f507bd%2C39151%2C1731026116929.1731026117246 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:37:27,717 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.meta.1731026116690.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:37:27,717 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,39151,1731026116929/3302f0f507bd%2C39151%2C1731026116929.1731026117246 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:37:28,718 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,39151,1731026116929/3302f0f507bd%2C39151%2C1731026116929.1731026117246 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:37:28,718 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.meta.1731026116690.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:37:29,712 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 8f8f623d37240aa31c757481007843e7, had cached 0 bytes from a total of 14329 2024-11-08T00:37:29,718 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,39151,1731026116929/3302f0f507bd%2C39151%2C1731026116929.1731026117246 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:37:29,718 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.meta.1731026116690.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:37:30,720 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,39151,1731026116929/3302f0f507bd%2C39151%2C1731026116929.1731026117246 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:37:30,720 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.meta.1731026116690.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:37:31,721 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,39151,1731026116929/3302f0f507bd%2C39151%2C1731026116929.1731026117246 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:37:31,721 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.meta.1731026116690.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:37:32,722 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,39151,1731026116929/3302f0f507bd%2C39151%2C1731026116929.1731026117246 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:37:32,722 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.meta.1731026116690.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:37:33,724 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,39151,1731026116929/3302f0f507bd%2C39151%2C1731026116929.1731026117246 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:37:33,724 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.meta.1731026116690.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:37:34,725 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.meta.1731026116690.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:37:34,725 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,39151,1731026116929/3302f0f507bd%2C39151%2C1731026116929.1731026117246 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:37:34,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46879 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=13 2024-11-08T00:37:34,795 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-08T00:37:34,800 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3302f0f507bd%2C38463%2C1731026203220.1731026254800 2024-11-08T00:37:34,837 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:37:34,837 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:37:34,837 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:37:34,838 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:37:34,838 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:37:34,838 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/71b20495-1467-5519-e967-680d6d9a6c0b/WALs/3302f0f507bd,38463,1731026203220/3302f0f507bd%2C38463%2C1731026203220.1731026244727 with entries=3, filesize=1.97 KB; new WAL /user/jenkins/test-data/71b20495-1467-5519-e967-680d6d9a6c0b/WALs/3302f0f507bd,38463,1731026203220/3302f0f507bd%2C38463%2C1731026203220.1731026254800 2024-11-08T00:37:34,839 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33855:33855),(127.0.0.1/127.0.0.1:32831:32831)] 2024-11-08T00:37:34,840 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:34977/user/jenkins/test-data/71b20495-1467-5519-e967-680d6d9a6c0b/WALs/3302f0f507bd,38463,1731026203220/3302f0f507bd%2C38463%2C1731026203220.1731026244727 is not closed yet, will try archiving it next time 2024-11-08T00:37:34,840 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-08T00:37:34,840 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:34977/user/jenkins/test-data/71b20495-1467-5519-e967-680d6d9a6c0b/WALs/3302f0f507bd,38463,1731026203220/3302f0f507bd%2C38463%2C1731026203220.1731026234652 to hdfs://localhost:34977/user/jenkins/test-data/71b20495-1467-5519-e967-680d6d9a6c0b/oldWALs/3302f0f507bd%2C38463%2C1731026203220.1731026234652 2024-11-08T00:37:34,840 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-08T00:37:34,840 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-08T00:37:34,840 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-08T00:37:34,841 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-08T00:37:34,841 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-08T00:37:34,841 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1173464021, stopped=false 2024-11-08T00:37:34,841 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741842_1018 (size=2026) 2024-11-08T00:37:34,841 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=3302f0f507bd,46879,1731026203046 2024-11-08T00:37:34,842 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-08T00:37:34,842 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46139 is added to blk_1073741842_1018 (size=2026) 2024-11-08T00:37:34,881 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46879-0x10117dfad400000, quorum=127.0.0.1:56642, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-08T00:37:34,881 DEBUG [pool-700-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38463-0x10117dfad400001, quorum=127.0.0.1:56642, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-08T00:37:34,881 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46879-0x10117dfad400000, quorum=127.0.0.1:56642, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T00:37:34,881 DEBUG [pool-700-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38463-0x10117dfad400001, quorum=127.0.0.1:56642, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T00:37:34,881 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-08T00:37:34,882 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-08T00:37:34,882 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-08T00:37:34,882 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:38463-0x10117dfad400001, quorum=127.0.0.1:56642, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-08T00:37:34,882 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-08T00:37:34,883 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:46879-0x10117dfad400000, quorum=127.0.0.1:56642, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-08T00:37:34,883 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '3302f0f507bd,38463,1731026203220' ***** 2024-11-08T00:37:34,883 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-08T00:37:34,883 INFO [RS:0;3302f0f507bd:38463 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-08T00:37:34,884 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-08T00:37:34,884 INFO [RS:0;3302f0f507bd:38463 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-08T00:37:34,884 INFO [RS:0;3302f0f507bd:38463 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-08T00:37:34,884 INFO [RS:0;3302f0f507bd:38463 {}] regionserver.HRegionServer(3091): Received CLOSE for 8f8f623d37240aa31c757481007843e7 2024-11-08T00:37:34,885 INFO [RS:0;3302f0f507bd:38463 {}] regionserver.HRegionServer(959): stopping server 3302f0f507bd,38463,1731026203220 2024-11-08T00:37:34,885 INFO [RS:0;3302f0f507bd:38463 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-08T00:37:34,885 INFO [RS:0;3302f0f507bd:38463 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;3302f0f507bd:38463. 2024-11-08T00:37:34,885 DEBUG [RS_CLOSE_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 8f8f623d37240aa31c757481007843e7, disabling compactions & flushes 2024-11-08T00:37:34,885 DEBUG [RS:0;3302f0f507bd:38463 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-08T00:37:34,885 DEBUG [RS:0;3302f0f507bd:38463 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-08T00:37:34,885 INFO [RS_CLOSE_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731026204368.8f8f623d37240aa31c757481007843e7. 2024-11-08T00:37:34,885 DEBUG [RS_CLOSE_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731026204368.8f8f623d37240aa31c757481007843e7. 2024-11-08T00:37:34,885 INFO [RS:0;3302f0f507bd:38463 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-08T00:37:34,885 INFO [RS:0;3302f0f507bd:38463 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-08T00:37:34,885 DEBUG [RS_CLOSE_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731026204368.8f8f623d37240aa31c757481007843e7. after waiting 0 ms 2024-11-08T00:37:34,885 INFO [RS:0;3302f0f507bd:38463 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-08T00:37:34,885 DEBUG [RS_CLOSE_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731026204368.8f8f623d37240aa31c757481007843e7. 2024-11-08T00:37:34,885 INFO [RS:0;3302f0f507bd:38463 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-08T00:37:34,886 INFO [RS_CLOSE_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing 8f8f623d37240aa31c757481007843e7 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-08T00:37:34,886 INFO [RS:0;3302f0f507bd:38463 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-08T00:37:34,886 DEBUG [RS:0;3302f0f507bd:38463 {}] regionserver.HRegionServer(1325): Online Regions={8f8f623d37240aa31c757481007843e7=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731026204368.8f8f623d37240aa31c757481007843e7., 1588230740=hbase:meta,,1.1588230740} 2024-11-08T00:37:34,886 DEBUG [RS:0;3302f0f507bd:38463 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 8f8f623d37240aa31c757481007843e7 2024-11-08T00:37:34,886 DEBUG [RS_CLOSE_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-08T00:37:34,886 INFO [RS_CLOSE_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-08T00:37:34,886 DEBUG [RS_CLOSE_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-08T00:37:34,886 DEBUG [RS_CLOSE_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-08T00:37:34,886 DEBUG [RS_CLOSE_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-08T00:37:34,887 INFO [RS_CLOSE_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.89 KB heapSize=3.91 KB 2024-11-08T00:37:34,891 DEBUG [RS_CLOSE_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34977/user/jenkins/test-data/71b20495-1467-5519-e967-680d6d9a6c0b/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8f8f623d37240aa31c757481007843e7/.tmp/info/15bd95eff2cb4b0da170d767b407747b is 1080, key is row0001/info:/1731026254798/Put/seqid=0 2024-11-08T00:37:34,895 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741845_1021 (size=6033) 2024-11-08T00:37:34,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46139 is added to blk_1073741845_1021 (size=6033) 2024-11-08T00:37:34,896 INFO [RS_CLOSE_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=22 (bloomFilter=true), to=hdfs://localhost:34977/user/jenkins/test-data/71b20495-1467-5519-e967-680d6d9a6c0b/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8f8f623d37240aa31c757481007843e7/.tmp/info/15bd95eff2cb4b0da170d767b407747b 2024-11-08T00:37:34,902 DEBUG [RS_CLOSE_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34977/user/jenkins/test-data/71b20495-1467-5519-e967-680d6d9a6c0b/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8f8f623d37240aa31c757481007843e7/.tmp/info/15bd95eff2cb4b0da170d767b407747b as hdfs://localhost:34977/user/jenkins/test-data/71b20495-1467-5519-e967-680d6d9a6c0b/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8f8f623d37240aa31c757481007843e7/info/15bd95eff2cb4b0da170d767b407747b 2024-11-08T00:37:34,905 DEBUG [RS_CLOSE_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34977/user/jenkins/test-data/71b20495-1467-5519-e967-680d6d9a6c0b/data/hbase/meta/1588230740/.tmp/info/f5b135692a3c4027acfbfd09465767c3 is 227, key is TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731026204368.8f8f623d37240aa31c757481007843e7./info:regioninfo/1731026204727/Put/seqid=0 2024-11-08T00:37:34,908 INFO [RS_CLOSE_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34977/user/jenkins/test-data/71b20495-1467-5519-e967-680d6d9a6c0b/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8f8f623d37240aa31c757481007843e7/info/15bd95eff2cb4b0da170d767b407747b, entries=1, sequenceid=22, filesize=5.9 K 2024-11-08T00:37:34,909 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741846_1022 (size=7308) 2024-11-08T00:37:34,909 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46139 is added to blk_1073741846_1022 (size=7308) 2024-11-08T00:37:34,909 INFO [RS_CLOSE_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 8f8f623d37240aa31c757481007843e7 in 24ms, sequenceid=22, compaction requested=true 2024-11-08T00:37:34,909 INFO [RS_CLOSE_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.65 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:34977/user/jenkins/test-data/71b20495-1467-5519-e967-680d6d9a6c0b/data/hbase/meta/1588230740/.tmp/info/f5b135692a3c4027acfbfd09465767c3 2024-11-08T00:37:34,910 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731026204368.8f8f623d37240aa31c757481007843e7.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:34977/user/jenkins/test-data/71b20495-1467-5519-e967-680d6d9a6c0b/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8f8f623d37240aa31c757481007843e7/info/35f0b4631f124ef7bdf310bba3bbe49e, hdfs://localhost:34977/user/jenkins/test-data/71b20495-1467-5519-e967-680d6d9a6c0b/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8f8f623d37240aa31c757481007843e7/info/1bafac5e4975431ba6bd3e2d86c0f71c, hdfs://localhost:34977/user/jenkins/test-data/71b20495-1467-5519-e967-680d6d9a6c0b/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8f8f623d37240aa31c757481007843e7/info/9d2ffdbf14f943ddb8574b3e635d55ff] to archive 2024-11-08T00:37:34,910 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731026204368.8f8f623d37240aa31c757481007843e7.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-08T00:37:34,912 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731026204368.8f8f623d37240aa31c757481007843e7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34977/user/jenkins/test-data/71b20495-1467-5519-e967-680d6d9a6c0b/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8f8f623d37240aa31c757481007843e7/info/35f0b4631f124ef7bdf310bba3bbe49e to hdfs://localhost:34977/user/jenkins/test-data/71b20495-1467-5519-e967-680d6d9a6c0b/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8f8f623d37240aa31c757481007843e7/info/35f0b4631f124ef7bdf310bba3bbe49e 2024-11-08T00:37:34,913 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731026204368.8f8f623d37240aa31c757481007843e7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34977/user/jenkins/test-data/71b20495-1467-5519-e967-680d6d9a6c0b/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8f8f623d37240aa31c757481007843e7/info/1bafac5e4975431ba6bd3e2d86c0f71c to hdfs://localhost:34977/user/jenkins/test-data/71b20495-1467-5519-e967-680d6d9a6c0b/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8f8f623d37240aa31c757481007843e7/info/1bafac5e4975431ba6bd3e2d86c0f71c 2024-11-08T00:37:34,914 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731026204368.8f8f623d37240aa31c757481007843e7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34977/user/jenkins/test-data/71b20495-1467-5519-e967-680d6d9a6c0b/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8f8f623d37240aa31c757481007843e7/info/9d2ffdbf14f943ddb8574b3e635d55ff to hdfs://localhost:34977/user/jenkins/test-data/71b20495-1467-5519-e967-680d6d9a6c0b/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8f8f623d37240aa31c757481007843e7/info/9d2ffdbf14f943ddb8574b3e635d55ff 2024-11-08T00:37:34,914 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731026204368.8f8f623d37240aa31c757481007843e7.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=3302f0f507bd:46879 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-11-08T00:37:34,914 WARN [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731026204368.8f8f623d37240aa31c757481007843e7.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [35f0b4631f124ef7bdf310bba3bbe49e=6033, 1bafac5e4975431ba6bd3e2d86c0f71c=6033, 9d2ffdbf14f943ddb8574b3e635d55ff=6033] 2024-11-08T00:37:34,918 DEBUG [RS_CLOSE_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34977/user/jenkins/test-data/71b20495-1467-5519-e967-680d6d9a6c0b/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8f8f623d37240aa31c757481007843e7/recovered.edits/25.seqid, newMaxSeqId=25, maxSeqId=1 2024-11-08T00:37:34,918 INFO [RS_CLOSE_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731026204368.8f8f623d37240aa31c757481007843e7. 2024-11-08T00:37:34,918 DEBUG [RS_CLOSE_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 8f8f623d37240aa31c757481007843e7: Waiting for close lock at 1731026254885Running coprocessor pre-close hooks at 1731026254885Disabling compacts and flushes for region at 1731026254885Disabling writes for close at 1731026254885Obtaining lock to block concurrent updates at 1731026254886 (+1 ms)Preparing flush snapshotting stores in 8f8f623d37240aa31c757481007843e7 at 1731026254886Finished memstore snapshotting TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731026204368.8f8f623d37240aa31c757481007843e7., syncing WAL and waiting on mvcc, flushsize=dataSize=1076, getHeapSize=1392, getOffHeapSize=0, getCellsCount=1 at 1731026254886Flushing stores of TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731026204368.8f8f623d37240aa31c757481007843e7. at 1731026254887 (+1 ms)Flushing 8f8f623d37240aa31c757481007843e7/info: creating writer at 1731026254887Flushing 8f8f623d37240aa31c757481007843e7/info: appending metadata at 1731026254890 (+3 ms)Flushing 8f8f623d37240aa31c757481007843e7/info: closing flushed file at 1731026254890Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5ef1d636: reopening flushed file at 1731026254901 (+11 ms)Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 8f8f623d37240aa31c757481007843e7 in 24ms, sequenceid=22, compaction requested=true at 1731026254909 (+8 ms)Writing region close event to WAL at 1731026254915 (+6 ms)Running coprocessor post-close hooks at 1731026254918 (+3 ms)Closed at 1731026254918 2024-11-08T00:37:34,919 DEBUG [RS_CLOSE_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731026204368.8f8f623d37240aa31c757481007843e7. 2024-11-08T00:37:34,927 DEBUG [RS_CLOSE_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34977/user/jenkins/test-data/71b20495-1467-5519-e967-680d6d9a6c0b/data/hbase/meta/1588230740/.tmp/ns/234673152ee14d7e8b98d09a5cc4a746 is 43, key is default/ns:d/1731026204311/Put/seqid=0 2024-11-08T00:37:34,931 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46139 is added to blk_1073741847_1023 (size=5153) 2024-11-08T00:37:34,932 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741847_1023 (size=5153) 2024-11-08T00:37:34,932 INFO [RS_CLOSE_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:34977/user/jenkins/test-data/71b20495-1467-5519-e967-680d6d9a6c0b/data/hbase/meta/1588230740/.tmp/ns/234673152ee14d7e8b98d09a5cc4a746 2024-11-08T00:37:34,957 DEBUG [RS_CLOSE_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34977/user/jenkins/test-data/71b20495-1467-5519-e967-680d6d9a6c0b/data/hbase/meta/1588230740/.tmp/table/6e4065fff2294119805a19aaae75f793 is 89, key is TestLogRolling-testCompactionRecordDoesntBlockRolling/table:state/1731026204736/Put/seqid=0 2024-11-08T00:37:34,962 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741848_1024 (size=5508) 2024-11-08T00:37:34,962 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46139 is added to blk_1073741848_1024 (size=5508) 2024-11-08T00:37:34,962 INFO [RS_CLOSE_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=170 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:34977/user/jenkins/test-data/71b20495-1467-5519-e967-680d6d9a6c0b/data/hbase/meta/1588230740/.tmp/table/6e4065fff2294119805a19aaae75f793 2024-11-08T00:37:34,967 DEBUG [RS_CLOSE_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34977/user/jenkins/test-data/71b20495-1467-5519-e967-680d6d9a6c0b/data/hbase/meta/1588230740/.tmp/info/f5b135692a3c4027acfbfd09465767c3 as hdfs://localhost:34977/user/jenkins/test-data/71b20495-1467-5519-e967-680d6d9a6c0b/data/hbase/meta/1588230740/info/f5b135692a3c4027acfbfd09465767c3 2024-11-08T00:37:34,972 INFO [RS_CLOSE_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34977/user/jenkins/test-data/71b20495-1467-5519-e967-680d6d9a6c0b/data/hbase/meta/1588230740/info/f5b135692a3c4027acfbfd09465767c3, entries=10, sequenceid=11, filesize=7.1 K 2024-11-08T00:37:34,973 DEBUG [RS_CLOSE_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34977/user/jenkins/test-data/71b20495-1467-5519-e967-680d6d9a6c0b/data/hbase/meta/1588230740/.tmp/ns/234673152ee14d7e8b98d09a5cc4a746 as hdfs://localhost:34977/user/jenkins/test-data/71b20495-1467-5519-e967-680d6d9a6c0b/data/hbase/meta/1588230740/ns/234673152ee14d7e8b98d09a5cc4a746 2024-11-08T00:37:34,978 INFO [RS_CLOSE_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34977/user/jenkins/test-data/71b20495-1467-5519-e967-680d6d9a6c0b/data/hbase/meta/1588230740/ns/234673152ee14d7e8b98d09a5cc4a746, entries=2, sequenceid=11, filesize=5.0 K 2024-11-08T00:37:34,979 DEBUG [RS_CLOSE_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34977/user/jenkins/test-data/71b20495-1467-5519-e967-680d6d9a6c0b/data/hbase/meta/1588230740/.tmp/table/6e4065fff2294119805a19aaae75f793 as hdfs://localhost:34977/user/jenkins/test-data/71b20495-1467-5519-e967-680d6d9a6c0b/data/hbase/meta/1588230740/table/6e4065fff2294119805a19aaae75f793 2024-11-08T00:37:34,985 INFO [RS_CLOSE_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34977/user/jenkins/test-data/71b20495-1467-5519-e967-680d6d9a6c0b/data/hbase/meta/1588230740/table/6e4065fff2294119805a19aaae75f793, entries=2, sequenceid=11, filesize=5.4 K 2024-11-08T00:37:34,986 INFO [RS_CLOSE_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.89 KB/1932, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 1588230740 in 100ms, sequenceid=11, compaction requested=false 2024-11-08T00:37:34,991 DEBUG [RS_CLOSE_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34977/user/jenkins/test-data/71b20495-1467-5519-e967-680d6d9a6c0b/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-08T00:37:34,991 DEBUG [RS_CLOSE_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-08T00:37:34,991 INFO [RS_CLOSE_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-08T00:37:34,991 DEBUG [RS_CLOSE_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731026254886Running coprocessor pre-close hooks at 1731026254886Disabling compacts and flushes for region at 1731026254886Disabling writes for close at 1731026254886Obtaining lock to block concurrent updates at 1731026254887 (+1 ms)Preparing flush snapshotting stores in 1588230740 at 1731026254887Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1932, getHeapSize=3936, getOffHeapSize=0, getCellsCount=14 at 1731026254887Flushing stores of hbase:meta,,1.1588230740 at 1731026254887Flushing 1588230740/info: creating writer at 1731026254888 (+1 ms)Flushing 1588230740/info: appending metadata at 1731026254905 (+17 ms)Flushing 1588230740/info: closing flushed file at 1731026254905Flushing 1588230740/ns: creating writer at 1731026254914 (+9 ms)Flushing 1588230740/ns: appending metadata at 1731026254927 (+13 ms)Flushing 1588230740/ns: closing flushed file at 1731026254927Flushing 1588230740/table: creating writer at 1731026254936 (+9 ms)Flushing 1588230740/table: appending metadata at 1731026254957 (+21 ms)Flushing 1588230740/table: closing flushed file at 1731026254957Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@621cb625: reopening flushed file at 1731026254967 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@63a76596: reopening flushed file at 1731026254972 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4d4fbdac: reopening flushed file at 1731026254979 (+7 ms)Finished flush of dataSize ~1.89 KB/1932, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 1588230740 in 100ms, sequenceid=11, compaction requested=false at 1731026254986 (+7 ms)Writing region close event to WAL at 1731026254987 (+1 ms)Running coprocessor post-close hooks at 1731026254991 (+4 ms)Closed at 1731026254991 2024-11-08T00:37:34,992 DEBUG [RS_CLOSE_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-08T00:37:35,086 INFO [RS:0;3302f0f507bd:38463 {}] regionserver.HRegionServer(976): stopping server 3302f0f507bd,38463,1731026203220; all regions closed. 2024-11-08T00:37:35,087 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:37:35,087 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:37:35,087 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:37:35,088 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:37:35,088 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:37:35,091 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741834_1010 (size=3306) 2024-11-08T00:37:35,092 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46139 is added to blk_1073741834_1010 (size=3306) 2024-11-08T00:37:35,096 DEBUG [RS:0;3302f0f507bd:38463 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/71b20495-1467-5519-e967-680d6d9a6c0b/oldWALs 2024-11-08T00:37:35,096 INFO [RS:0;3302f0f507bd:38463 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 3302f0f507bd%2C38463%2C1731026203220.meta:.meta(num 1731026204211) 2024-11-08T00:37:35,096 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:37:35,097 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:37:35,097 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:37:35,098 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:37:35,098 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:37:35,100 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741844_1020 (size=1252) 2024-11-08T00:37:35,101 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46139 is added to blk_1073741844_1020 (size=1252) 2024-11-08T00:37:35,103 DEBUG [RS:0;3302f0f507bd:38463 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/71b20495-1467-5519-e967-680d6d9a6c0b/oldWALs 2024-11-08T00:37:35,103 INFO [RS:0;3302f0f507bd:38463 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 3302f0f507bd%2C38463%2C1731026203220:(num 1731026254800) 2024-11-08T00:37:35,103 DEBUG [RS:0;3302f0f507bd:38463 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-08T00:37:35,103 INFO [RS:0;3302f0f507bd:38463 {}] regionserver.LeaseManager(133): Closed leases 2024-11-08T00:37:35,103 INFO [RS:0;3302f0f507bd:38463 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-08T00:37:35,103 INFO [RS:0;3302f0f507bd:38463 {}] hbase.ChoreService(370): Chore service for: regionserver/3302f0f507bd:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-08T00:37:35,103 INFO [RS:0;3302f0f507bd:38463 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-08T00:37:35,103 INFO [regionserver/3302f0f507bd:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-08T00:37:35,104 INFO [RS:0;3302f0f507bd:38463 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:38463 2024-11-08T00:37:35,124 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46879-0x10117dfad400000, quorum=127.0.0.1:56642, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-08T00:37:35,124 DEBUG [pool-700-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38463-0x10117dfad400001, quorum=127.0.0.1:56642, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/3302f0f507bd,38463,1731026203220 2024-11-08T00:37:35,124 INFO [RS:0;3302f0f507bd:38463 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-08T00:37:35,135 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [3302f0f507bd,38463,1731026203220] 2024-11-08T00:37:35,145 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/3302f0f507bd,38463,1731026203220 already deleted, retry=false 2024-11-08T00:37:35,145 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 3302f0f507bd,38463,1731026203220 expired; onlineServers=0 2024-11-08T00:37:35,145 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '3302f0f507bd,46879,1731026203046' ***** 2024-11-08T00:37:35,145 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-08T00:37:35,145 INFO [M:0;3302f0f507bd:46879 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-08T00:37:35,146 INFO [M:0;3302f0f507bd:46879 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-08T00:37:35,146 DEBUG [M:0;3302f0f507bd:46879 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-08T00:37:35,146 DEBUG [M:0;3302f0f507bd:46879 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-08T00:37:35,146 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-08T00:37:35,146 DEBUG [master/3302f0f507bd:0:becomeActiveMaster-HFileCleaner.large.0-1731026203574 {}] cleaner.HFileCleaner(306): Exit Thread[master/3302f0f507bd:0:becomeActiveMaster-HFileCleaner.large.0-1731026203574,5,FailOnTimeoutGroup] 2024-11-08T00:37:35,146 DEBUG [master/3302f0f507bd:0:becomeActiveMaster-HFileCleaner.small.0-1731026203574 {}] cleaner.HFileCleaner(306): Exit Thread[master/3302f0f507bd:0:becomeActiveMaster-HFileCleaner.small.0-1731026203574,5,FailOnTimeoutGroup] 2024-11-08T00:37:35,146 INFO [M:0;3302f0f507bd:46879 {}] hbase.ChoreService(370): Chore service for: master/3302f0f507bd:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-08T00:37:35,146 INFO [M:0;3302f0f507bd:46879 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-08T00:37:35,147 DEBUG [M:0;3302f0f507bd:46879 {}] master.HMaster(1795): Stopping service threads 2024-11-08T00:37:35,147 INFO [M:0;3302f0f507bd:46879 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-08T00:37:35,147 INFO [M:0;3302f0f507bd:46879 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-08T00:37:35,147 INFO [M:0;3302f0f507bd:46879 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-08T00:37:35,147 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-08T00:37:35,156 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46879-0x10117dfad400000, quorum=127.0.0.1:56642, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-08T00:37:35,156 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46879-0x10117dfad400000, quorum=127.0.0.1:56642, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T00:37:35,156 DEBUG [M:0;3302f0f507bd:46879 {}] zookeeper.ZKUtil(347): master:46879-0x10117dfad400000, quorum=127.0.0.1:56642, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-08T00:37:35,156 WARN [M:0;3302f0f507bd:46879 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-08T00:37:35,157 INFO [M:0;3302f0f507bd:46879 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:34977/user/jenkins/test-data/71b20495-1467-5519-e967-680d6d9a6c0b/.lastflushedseqids 2024-11-08T00:37:35,169 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46139 is added to blk_1073741849_1025 (size=130) 2024-11-08T00:37:35,169 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741849_1025 (size=130) 2024-11-08T00:37:35,170 INFO [M:0;3302f0f507bd:46879 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-08T00:37:35,170 INFO [M:0;3302f0f507bd:46879 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-08T00:37:35,171 DEBUG [M:0;3302f0f507bd:46879 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-08T00:37:35,171 INFO [M:0;3302f0f507bd:46879 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-08T00:37:35,171 DEBUG [M:0;3302f0f507bd:46879 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-08T00:37:35,171 DEBUG [M:0;3302f0f507bd:46879 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-08T00:37:35,171 DEBUG [M:0;3302f0f507bd:46879 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-08T00:37:35,171 INFO [M:0;3302f0f507bd:46879 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=43.58 KB heapSize=54.99 KB 2024-11-08T00:37:35,190 DEBUG [M:0;3302f0f507bd:46879 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34977/user/jenkins/test-data/71b20495-1467-5519-e967-680d6d9a6c0b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/9693382595ea43248964e2c1d57a78f5 is 82, key is hbase:meta,,1/info:regioninfo/1731026204240/Put/seqid=0 2024-11-08T00:37:35,195 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741850_1026 (size=5672) 2024-11-08T00:37:35,195 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46139 is added to blk_1073741850_1026 (size=5672) 2024-11-08T00:37:35,195 INFO [M:0;3302f0f507bd:46879 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:34977/user/jenkins/test-data/71b20495-1467-5519-e967-680d6d9a6c0b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/9693382595ea43248964e2c1d57a78f5 2024-11-08T00:37:35,217 DEBUG [M:0;3302f0f507bd:46879 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34977/user/jenkins/test-data/71b20495-1467-5519-e967-680d6d9a6c0b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/b7bf17d9a7af4b7dbf4372f60e4c4aec is 798, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731026204740/Put/seqid=0 2024-11-08T00:37:35,221 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741851_1027 (size=7822) 2024-11-08T00:37:35,221 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46139 is added to blk_1073741851_1027 (size=7822) 2024-11-08T00:37:35,222 INFO [M:0;3302f0f507bd:46879 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.98 KB at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:34977/user/jenkins/test-data/71b20495-1467-5519-e967-680d6d9a6c0b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/b7bf17d9a7af4b7dbf4372f60e4c4aec 2024-11-08T00:37:35,226 INFO [M:0;3302f0f507bd:46879 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for b7bf17d9a7af4b7dbf4372f60e4c4aec 2024-11-08T00:37:35,235 INFO [RS:0;3302f0f507bd:38463 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-08T00:37:35,235 DEBUG [pool-700-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38463-0x10117dfad400001, quorum=127.0.0.1:56642, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-08T00:37:35,235 INFO [RS:0;3302f0f507bd:38463 {}] regionserver.HRegionServer(1031): Exiting; stopping=3302f0f507bd,38463,1731026203220; zookeeper connection closed. 2024-11-08T00:37:35,235 DEBUG [pool-700-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38463-0x10117dfad400001, quorum=127.0.0.1:56642, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-08T00:37:35,235 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@518a8f7a {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@518a8f7a 2024-11-08T00:37:35,235 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-08T00:37:35,241 DEBUG [M:0;3302f0f507bd:46879 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34977/user/jenkins/test-data/71b20495-1467-5519-e967-680d6d9a6c0b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/ff2df2f1ade645a8b148323ea7c53821 is 69, key is 3302f0f507bd,38463,1731026203220/rs:state/1731026203693/Put/seqid=0 2024-11-08T00:37:35,245 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741852_1028 (size=5156) 2024-11-08T00:37:35,245 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46139 is added to blk_1073741852_1028 (size=5156) 2024-11-08T00:37:35,246 INFO [M:0;3302f0f507bd:46879 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:34977/user/jenkins/test-data/71b20495-1467-5519-e967-680d6d9a6c0b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/ff2df2f1ade645a8b148323ea7c53821 2024-11-08T00:37:35,264 DEBUG [M:0;3302f0f507bd:46879 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34977/user/jenkins/test-data/71b20495-1467-5519-e967-680d6d9a6c0b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/a81c48ff707f4ab68eb269389d77b6b2 is 52, key is load_balancer_on/state:d/1731026204363/Put/seqid=0 2024-11-08T00:37:35,268 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741853_1029 (size=5056) 2024-11-08T00:37:35,269 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46139 is added to blk_1073741853_1029 (size=5056) 2024-11-08T00:37:35,269 INFO [M:0;3302f0f507bd:46879 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:34977/user/jenkins/test-data/71b20495-1467-5519-e967-680d6d9a6c0b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/a81c48ff707f4ab68eb269389d77b6b2 2024-11-08T00:37:35,274 DEBUG [M:0;3302f0f507bd:46879 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34977/user/jenkins/test-data/71b20495-1467-5519-e967-680d6d9a6c0b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/9693382595ea43248964e2c1d57a78f5 as hdfs://localhost:34977/user/jenkins/test-data/71b20495-1467-5519-e967-680d6d9a6c0b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/9693382595ea43248964e2c1d57a78f5 2024-11-08T00:37:35,278 INFO [M:0;3302f0f507bd:46879 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34977/user/jenkins/test-data/71b20495-1467-5519-e967-680d6d9a6c0b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/9693382595ea43248964e2c1d57a78f5, entries=8, sequenceid=121, filesize=5.5 K 2024-11-08T00:37:35,279 DEBUG [M:0;3302f0f507bd:46879 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34977/user/jenkins/test-data/71b20495-1467-5519-e967-680d6d9a6c0b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/b7bf17d9a7af4b7dbf4372f60e4c4aec as hdfs://localhost:34977/user/jenkins/test-data/71b20495-1467-5519-e967-680d6d9a6c0b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/b7bf17d9a7af4b7dbf4372f60e4c4aec 2024-11-08T00:37:35,283 INFO [M:0;3302f0f507bd:46879 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for b7bf17d9a7af4b7dbf4372f60e4c4aec 2024-11-08T00:37:35,283 INFO [M:0;3302f0f507bd:46879 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34977/user/jenkins/test-data/71b20495-1467-5519-e967-680d6d9a6c0b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/b7bf17d9a7af4b7dbf4372f60e4c4aec, entries=14, sequenceid=121, filesize=7.6 K 2024-11-08T00:37:35,284 DEBUG [M:0;3302f0f507bd:46879 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34977/user/jenkins/test-data/71b20495-1467-5519-e967-680d6d9a6c0b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/ff2df2f1ade645a8b148323ea7c53821 as hdfs://localhost:34977/user/jenkins/test-data/71b20495-1467-5519-e967-680d6d9a6c0b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/ff2df2f1ade645a8b148323ea7c53821 2024-11-08T00:37:35,288 INFO [M:0;3302f0f507bd:46879 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34977/user/jenkins/test-data/71b20495-1467-5519-e967-680d6d9a6c0b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/ff2df2f1ade645a8b148323ea7c53821, entries=1, sequenceid=121, filesize=5.0 K 2024-11-08T00:37:35,289 DEBUG [M:0;3302f0f507bd:46879 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34977/user/jenkins/test-data/71b20495-1467-5519-e967-680d6d9a6c0b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/a81c48ff707f4ab68eb269389d77b6b2 as hdfs://localhost:34977/user/jenkins/test-data/71b20495-1467-5519-e967-680d6d9a6c0b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/a81c48ff707f4ab68eb269389d77b6b2 2024-11-08T00:37:35,294 INFO [M:0;3302f0f507bd:46879 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34977/user/jenkins/test-data/71b20495-1467-5519-e967-680d6d9a6c0b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/a81c48ff707f4ab68eb269389d77b6b2, entries=1, sequenceid=121, filesize=4.9 K 2024-11-08T00:37:35,295 INFO [M:0;3302f0f507bd:46879 {}] regionserver.HRegion(3140): Finished flush of dataSize ~43.58 KB/44629, heapSize ~54.93 KB/56248, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 124ms, sequenceid=121, compaction requested=false 2024-11-08T00:37:35,296 INFO [M:0;3302f0f507bd:46879 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-08T00:37:35,297 DEBUG [M:0;3302f0f507bd:46879 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731026255171Disabling compacts and flushes for region at 1731026255171Disabling writes for close at 1731026255171Obtaining lock to block concurrent updates at 1731026255171Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731026255171Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=44629, getHeapSize=56248, getOffHeapSize=0, getCellsCount=140 at 1731026255172 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731026255173 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731026255173Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731026255190 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731026255190Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731026255200 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731026255216 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731026255216Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731026255226 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731026255240 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731026255240Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731026255249 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731026255263 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731026255263Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@57490544: reopening flushed file at 1731026255273 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@590eecb0: reopening flushed file at 1731026255279 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@61904615: reopening flushed file at 1731026255284 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@56ccba0e: reopening flushed file at 1731026255289 (+5 ms)Finished flush of dataSize ~43.58 KB/44629, heapSize ~54.93 KB/56248, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 124ms, sequenceid=121, compaction requested=false at 1731026255295 (+6 ms)Writing region close event to WAL at 1731026255296 (+1 ms)Closed at 1731026255296 2024-11-08T00:37:35,297 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:37:35,297 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:37:35,297 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:37:35,297 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:37:35,297 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:37:35,299 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46139 is added to blk_1073741830_1006 (size=53026) 2024-11-08T00:37:35,299 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741830_1006 (size=53026) 2024-11-08T00:37:35,299 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-08T00:37:35,299 INFO [M:0;3302f0f507bd:46879 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-08T00:37:35,300 INFO [M:0;3302f0f507bd:46879 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:46879 2024-11-08T00:37:35,300 INFO [M:0;3302f0f507bd:46879 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-08T00:37:35,407 INFO [M:0;3302f0f507bd:46879 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-08T00:37:35,407 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46879-0x10117dfad400000, quorum=127.0.0.1:56642, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-08T00:37:35,407 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46879-0x10117dfad400000, quorum=127.0.0.1:56642, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-08T00:37:35,410 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@14b98ef8{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-08T00:37:35,410 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7fc2e521{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-08T00:37:35,410 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-08T00:37:35,410 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@65345c29{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-08T00:37:35,410 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7009eb0{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a090713a-d661-5af9-c70e-cd83f95f0d1a/hadoop.log.dir/,STOPPED} 2024-11-08T00:37:35,412 WARN [BP-1032823788-172.17.0.3-1731026200566 heartbeating to localhost/127.0.0.1:34977 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-08T00:37:35,412 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-08T00:37:35,412 WARN [BP-1032823788-172.17.0.3-1731026200566 heartbeating to localhost/127.0.0.1:34977 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1032823788-172.17.0.3-1731026200566 (Datanode Uuid b59bfa08-3364-48c9-b627-1f6ae932366c) service to localhost/127.0.0.1:34977 2024-11-08T00:37:35,412 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-08T00:37:35,412 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a090713a-d661-5af9-c70e-cd83f95f0d1a/cluster_47bce739-856b-0996-e8e7-6762f6a1d79e/data/data3/current/BP-1032823788-172.17.0.3-1731026200566 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-08T00:37:35,413 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a090713a-d661-5af9-c70e-cd83f95f0d1a/cluster_47bce739-856b-0996-e8e7-6762f6a1d79e/data/data4/current/BP-1032823788-172.17.0.3-1731026200566 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-08T00:37:35,413 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-08T00:37:35,415 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@e4582a5{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-08T00:37:35,415 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@37d74326{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-08T00:37:35,415 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-08T00:37:35,415 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@e6bebf5{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-08T00:37:35,415 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@278dab99{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a090713a-d661-5af9-c70e-cd83f95f0d1a/hadoop.log.dir/,STOPPED} 2024-11-08T00:37:35,416 WARN [BP-1032823788-172.17.0.3-1731026200566 heartbeating to localhost/127.0.0.1:34977 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-08T00:37:35,416 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-08T00:37:35,416 WARN [BP-1032823788-172.17.0.3-1731026200566 heartbeating to localhost/127.0.0.1:34977 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1032823788-172.17.0.3-1731026200566 (Datanode Uuid 41142ddb-f7bc-418d-a666-f5f8dbafdba5) service to localhost/127.0.0.1:34977 2024-11-08T00:37:35,416 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-08T00:37:35,417 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a090713a-d661-5af9-c70e-cd83f95f0d1a/cluster_47bce739-856b-0996-e8e7-6762f6a1d79e/data/data1/current/BP-1032823788-172.17.0.3-1731026200566 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-08T00:37:35,417 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a090713a-d661-5af9-c70e-cd83f95f0d1a/cluster_47bce739-856b-0996-e8e7-6762f6a1d79e/data/data2/current/BP-1032823788-172.17.0.3-1731026200566 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-08T00:37:35,417 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-08T00:37:35,422 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@30d9f702{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-08T00:37:35,422 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6d483d07{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-08T00:37:35,422 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-08T00:37:35,423 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@df163d0{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-08T00:37:35,423 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6da95783{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a090713a-d661-5af9-c70e-cd83f95f0d1a/hadoop.log.dir/,STOPPED} 2024-11-08T00:37:35,428 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-08T00:37:35,447 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-08T00:37:35,454 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testCompactionRecordDoesntBlockRolling Thread=210 (was 183) Potentially hanging thread: nioEventLoopGroup-34-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: regionserver/3302f0f507bd:0.leaseChecker java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.regionserver.LeaseManager.run(LeaseManager.java:82) Potentially hanging thread: HMaster-EventLoopGroup-12-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:34977 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-35-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:34977 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:34977 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:34977 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-16 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:34977 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.5@localhost:34977 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:34977 from jenkins.hfs.5 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:34977 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:34977 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) - Thread LEAK? -, OpenFileDescriptor=483 (was 457) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=154 (was 224), ProcessCount=11 (was 11), AvailableMemoryMB=6767 (was 6138) - AvailableMemoryMB LEAK? - 2024-11-08T00:37:35,460 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRolling Thread=210, OpenFileDescriptor=483, MaxFileDescriptor=1048576, SystemLoadAverage=154, ProcessCount=11, AvailableMemoryMB=6767 2024-11-08T00:37:35,460 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-08T00:37:35,461 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a090713a-d661-5af9-c70e-cd83f95f0d1a/hadoop.log.dir so I do NOT create it in target/test-data/0fc5b3bf-9aae-8b07-e2fd-d7e35ce18818 2024-11-08T00:37:35,461 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a090713a-d661-5af9-c70e-cd83f95f0d1a/hadoop.tmp.dir so I do NOT create it in target/test-data/0fc5b3bf-9aae-8b07-e2fd-d7e35ce18818 2024-11-08T00:37:35,461 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0fc5b3bf-9aae-8b07-e2fd-d7e35ce18818/cluster_f4f62a24-dad1-52fc-3311-356e57af5aa6, deleteOnExit=true 2024-11-08T00:37:35,461 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-08T00:37:35,461 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0fc5b3bf-9aae-8b07-e2fd-d7e35ce18818/test.cache.data in system properties and HBase conf 2024-11-08T00:37:35,461 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0fc5b3bf-9aae-8b07-e2fd-d7e35ce18818/hadoop.tmp.dir in system properties and HBase conf 2024-11-08T00:37:35,461 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0fc5b3bf-9aae-8b07-e2fd-d7e35ce18818/hadoop.log.dir in system properties and HBase conf 2024-11-08T00:37:35,461 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0fc5b3bf-9aae-8b07-e2fd-d7e35ce18818/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-08T00:37:35,461 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0fc5b3bf-9aae-8b07-e2fd-d7e35ce18818/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-08T00:37:35,461 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-08T00:37:35,461 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-08T00:37:35,461 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0fc5b3bf-9aae-8b07-e2fd-d7e35ce18818/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-08T00:37:35,461 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0fc5b3bf-9aae-8b07-e2fd-d7e35ce18818/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-08T00:37:35,461 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0fc5b3bf-9aae-8b07-e2fd-d7e35ce18818/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-08T00:37:35,461 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0fc5b3bf-9aae-8b07-e2fd-d7e35ce18818/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-08T00:37:35,462 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0fc5b3bf-9aae-8b07-e2fd-d7e35ce18818/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-08T00:37:35,462 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0fc5b3bf-9aae-8b07-e2fd-d7e35ce18818/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-08T00:37:35,462 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0fc5b3bf-9aae-8b07-e2fd-d7e35ce18818/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-08T00:37:35,462 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0fc5b3bf-9aae-8b07-e2fd-d7e35ce18818/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-08T00:37:35,462 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0fc5b3bf-9aae-8b07-e2fd-d7e35ce18818/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-08T00:37:35,462 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0fc5b3bf-9aae-8b07-e2fd-d7e35ce18818/nfs.dump.dir in system properties and HBase conf 2024-11-08T00:37:35,462 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0fc5b3bf-9aae-8b07-e2fd-d7e35ce18818/java.io.tmpdir in system properties and HBase conf 2024-11-08T00:37:35,462 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0fc5b3bf-9aae-8b07-e2fd-d7e35ce18818/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-08T00:37:35,462 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0fc5b3bf-9aae-8b07-e2fd-d7e35ce18818/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-08T00:37:35,462 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0fc5b3bf-9aae-8b07-e2fd-d7e35ce18818/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-08T00:37:35,475 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-08T00:37:35,719 INFO [regionserver/3302f0f507bd:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-08T00:37:35,727 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.meta.1731026116690.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:37:35,727 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,39151,1731026116929/3302f0f507bd%2C39151%2C1731026116929.1731026117246 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:37:35,803 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-08T00:37:35,807 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-08T00:37:35,808 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-08T00:37:35,808 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-08T00:37:35,808 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-08T00:37:35,809 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-08T00:37:35,809 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@314e7370{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0fc5b3bf-9aae-8b07-e2fd-d7e35ce18818/hadoop.log.dir/,AVAILABLE} 2024-11-08T00:37:35,809 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@425d5d71{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-08T00:37:35,902 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5ff7780b{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0fc5b3bf-9aae-8b07-e2fd-d7e35ce18818/java.io.tmpdir/jetty-localhost-34297-hadoop-hdfs-3_4_1-tests_jar-_-any-12868423717359057931/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-08T00:37:35,902 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@20d96a0d{HTTP/1.1, (http/1.1)}{localhost:34297} 2024-11-08T00:37:35,902 INFO [Time-limited test {}] server.Server(415): Started @249999ms 2024-11-08T00:37:35,914 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-08T00:37:36,307 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-08T00:37:36,311 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-08T00:37:36,312 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-08T00:37:36,312 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-08T00:37:36,312 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-08T00:37:36,313 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2eb912ab{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0fc5b3bf-9aae-8b07-e2fd-d7e35ce18818/hadoop.log.dir/,AVAILABLE} 2024-11-08T00:37:36,313 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7f424370{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-08T00:37:36,406 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4a6e8e46{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0fc5b3bf-9aae-8b07-e2fd-d7e35ce18818/java.io.tmpdir/jetty-localhost-38583-hadoop-hdfs-3_4_1-tests_jar-_-any-120692743004912428/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-08T00:37:36,406 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1204fb24{HTTP/1.1, (http/1.1)}{localhost:38583} 2024-11-08T00:37:36,406 INFO [Time-limited test {}] server.Server(415): Started @250504ms 2024-11-08T00:37:36,408 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-08T00:37:36,432 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-08T00:37:36,434 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-08T00:37:36,437 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-08T00:37:36,437 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-08T00:37:36,437 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-08T00:37:36,437 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@13ef5561{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0fc5b3bf-9aae-8b07-e2fd-d7e35ce18818/hadoop.log.dir/,AVAILABLE} 2024-11-08T00:37:36,437 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2f61588{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-08T00:37:36,529 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@42c5c09{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0fc5b3bf-9aae-8b07-e2fd-d7e35ce18818/java.io.tmpdir/jetty-localhost-33077-hadoop-hdfs-3_4_1-tests_jar-_-any-18237691022860589560/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-08T00:37:36,529 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5ce0a24{HTTP/1.1, (http/1.1)}{localhost:33077} 2024-11-08T00:37:36,529 INFO [Time-limited test {}] server.Server(415): Started @250626ms 2024-11-08T00:37:36,530 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-08T00:37:36,727 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,39151,1731026116929/3302f0f507bd%2C39151%2C1731026116929.1731026117246 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:37:36,727 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.meta.1731026116690.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:37:37,728 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,39151,1731026116929/3302f0f507bd%2C39151%2C1731026116929.1731026117246 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:37:37,728 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.meta.1731026116690.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:37:37,808 WARN [Thread-1962 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0fc5b3bf-9aae-8b07-e2fd-d7e35ce18818/cluster_f4f62a24-dad1-52fc-3311-356e57af5aa6/data/data2/current/BP-414991167-172.17.0.3-1731026255478/current, will proceed with Du for space computation calculation, 2024-11-08T00:37:37,808 WARN [Thread-1961 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0fc5b3bf-9aae-8b07-e2fd-d7e35ce18818/cluster_f4f62a24-dad1-52fc-3311-356e57af5aa6/data/data1/current/BP-414991167-172.17.0.3-1731026255478/current, will proceed with Du for space computation calculation, 2024-11-08T00:37:37,826 WARN [Thread-1925 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-08T00:37:37,828 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x9cfdee9f4a6a4bd6 with lease ID 0x4f378637889193ed: Processing first storage report for DS-b7fab7b9-6357-410f-9969-4025cfb6cc91 from datanode DatanodeRegistration(127.0.0.1:33277, datanodeUuid=4bcf6621-59ec-4f2b-9b89-673e26f6a24c, infoPort=34681, infoSecurePort=0, ipcPort=46535, storageInfo=lv=-57;cid=testClusterID;nsid=962727284;c=1731026255478) 2024-11-08T00:37:37,828 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x9cfdee9f4a6a4bd6 with lease ID 0x4f378637889193ed: from storage DS-b7fab7b9-6357-410f-9969-4025cfb6cc91 node DatanodeRegistration(127.0.0.1:33277, datanodeUuid=4bcf6621-59ec-4f2b-9b89-673e26f6a24c, infoPort=34681, infoSecurePort=0, ipcPort=46535, storageInfo=lv=-57;cid=testClusterID;nsid=962727284;c=1731026255478), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-08T00:37:37,828 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x9cfdee9f4a6a4bd6 with lease ID 0x4f378637889193ed: Processing first storage report for DS-3ec90533-815a-4d84-9f87-5d7931e6bfa1 from datanode DatanodeRegistration(127.0.0.1:33277, datanodeUuid=4bcf6621-59ec-4f2b-9b89-673e26f6a24c, infoPort=34681, infoSecurePort=0, ipcPort=46535, storageInfo=lv=-57;cid=testClusterID;nsid=962727284;c=1731026255478) 2024-11-08T00:37:37,828 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x9cfdee9f4a6a4bd6 with lease ID 0x4f378637889193ed: from storage DS-3ec90533-815a-4d84-9f87-5d7931e6bfa1 node DatanodeRegistration(127.0.0.1:33277, datanodeUuid=4bcf6621-59ec-4f2b-9b89-673e26f6a24c, infoPort=34681, infoSecurePort=0, ipcPort=46535, storageInfo=lv=-57;cid=testClusterID;nsid=962727284;c=1731026255478), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-08T00:37:37,925 WARN [Thread-1972 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0fc5b3bf-9aae-8b07-e2fd-d7e35ce18818/cluster_f4f62a24-dad1-52fc-3311-356e57af5aa6/data/data3/current/BP-414991167-172.17.0.3-1731026255478/current, will proceed with Du for space computation calculation, 2024-11-08T00:37:37,926 WARN [Thread-1973 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0fc5b3bf-9aae-8b07-e2fd-d7e35ce18818/cluster_f4f62a24-dad1-52fc-3311-356e57af5aa6/data/data4/current/BP-414991167-172.17.0.3-1731026255478/current, will proceed with Du for space computation calculation, 2024-11-08T00:37:37,945 WARN [Thread-1948 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-08T00:37:37,947 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x56a2e168eddd72b with lease ID 0x4f378637889193ee: Processing first storage report for DS-8c91a542-c084-4e92-8942-8aa0cc99f304 from datanode DatanodeRegistration(127.0.0.1:43605, datanodeUuid=bf7e85fc-51ed-4bc8-98cd-c3e459598d95, infoPort=33457, infoSecurePort=0, ipcPort=32877, storageInfo=lv=-57;cid=testClusterID;nsid=962727284;c=1731026255478) 2024-11-08T00:37:37,947 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x56a2e168eddd72b with lease ID 0x4f378637889193ee: from storage DS-8c91a542-c084-4e92-8942-8aa0cc99f304 node DatanodeRegistration(127.0.0.1:43605, datanodeUuid=bf7e85fc-51ed-4bc8-98cd-c3e459598d95, infoPort=33457, infoSecurePort=0, ipcPort=32877, storageInfo=lv=-57;cid=testClusterID;nsid=962727284;c=1731026255478), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-08T00:37:37,947 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x56a2e168eddd72b with lease ID 0x4f378637889193ee: Processing first storage report for DS-bd1946a5-1178-46fd-853e-63afbce80c77 from datanode DatanodeRegistration(127.0.0.1:43605, datanodeUuid=bf7e85fc-51ed-4bc8-98cd-c3e459598d95, infoPort=33457, infoSecurePort=0, ipcPort=32877, storageInfo=lv=-57;cid=testClusterID;nsid=962727284;c=1731026255478) 2024-11-08T00:37:37,947 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x56a2e168eddd72b with lease ID 0x4f378637889193ee: from storage DS-bd1946a5-1178-46fd-853e-63afbce80c77 node DatanodeRegistration(127.0.0.1:43605, datanodeUuid=bf7e85fc-51ed-4bc8-98cd-c3e459598d95, infoPort=33457, infoSecurePort=0, ipcPort=32877, storageInfo=lv=-57;cid=testClusterID;nsid=962727284;c=1731026255478), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-08T00:37:37,965 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0fc5b3bf-9aae-8b07-e2fd-d7e35ce18818 2024-11-08T00:37:37,969 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0fc5b3bf-9aae-8b07-e2fd-d7e35ce18818/cluster_f4f62a24-dad1-52fc-3311-356e57af5aa6/zookeeper_0, clientPort=51204, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0fc5b3bf-9aae-8b07-e2fd-d7e35ce18818/cluster_f4f62a24-dad1-52fc-3311-356e57af5aa6/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0fc5b3bf-9aae-8b07-e2fd-d7e35ce18818/cluster_f4f62a24-dad1-52fc-3311-356e57af5aa6/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-08T00:37:37,970 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=51204 2024-11-08T00:37:37,971 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-08T00:37:37,973 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-08T00:37:37,981 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43605 is added to blk_1073741825_1001 (size=7) 2024-11-08T00:37:37,981 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33277 is added to blk_1073741825_1001 (size=7) 2024-11-08T00:37:37,982 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797 with version=8 2024-11-08T00:37:37,982 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:45125/user/jenkins/test-data/a52118f7-c961-aacf-3827-5de0e737d868/hbase-staging 2024-11-08T00:37:37,983 INFO [Time-limited test {}] client.ConnectionUtils(128): master/3302f0f507bd:0 server-side Connection retries=45 2024-11-08T00:37:37,984 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-08T00:37:37,984 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-08T00:37:37,984 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-08T00:37:37,984 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-08T00:37:37,984 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-08T00:37:37,984 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-08T00:37:37,984 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-08T00:37:37,984 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:46443 2024-11-08T00:37:37,986 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:46443 connecting to ZooKeeper ensemble=127.0.0.1:51204 2024-11-08T00:37:38,038 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:464430x0, quorum=127.0.0.1:51204, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-08T00:37:38,039 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:46443-0x10117e083db0000 connected 2024-11-08T00:37:38,139 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-08T00:37:38,143 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-08T00:37:38,148 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:46443-0x10117e083db0000, quorum=127.0.0.1:51204, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-08T00:37:38,148 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797, hbase.cluster.distributed=false 2024-11-08T00:37:38,152 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:46443-0x10117e083db0000, quorum=127.0.0.1:51204, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-08T00:37:38,152 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=46443 2024-11-08T00:37:38,152 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=46443 2024-11-08T00:37:38,153 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=46443 2024-11-08T00:37:38,153 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=46443 2024-11-08T00:37:38,153 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=46443 2024-11-08T00:37:38,169 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/3302f0f507bd:0 server-side Connection retries=45 2024-11-08T00:37:38,170 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-08T00:37:38,170 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-08T00:37:38,170 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-08T00:37:38,170 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-08T00:37:38,170 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-08T00:37:38,170 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-08T00:37:38,170 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-08T00:37:38,170 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:37459 2024-11-08T00:37:38,172 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:37459 connecting to ZooKeeper ensemble=127.0.0.1:51204 2024-11-08T00:37:38,172 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-08T00:37:38,173 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-08T00:37:38,187 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:374590x0, quorum=127.0.0.1:51204, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-08T00:37:38,188 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:37459-0x10117e083db0001 connected 2024-11-08T00:37:38,188 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37459-0x10117e083db0001, quorum=127.0.0.1:51204, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-08T00:37:38,188 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-08T00:37:38,188 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-08T00:37:38,189 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37459-0x10117e083db0001, quorum=127.0.0.1:51204, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-08T00:37:38,190 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37459-0x10117e083db0001, quorum=127.0.0.1:51204, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-08T00:37:38,191 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=37459 2024-11-08T00:37:38,191 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=37459 2024-11-08T00:37:38,193 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=37459 2024-11-08T00:37:38,193 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=37459 2024-11-08T00:37:38,193 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=37459 2024-11-08T00:37:38,208 DEBUG [M:0;3302f0f507bd:46443 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;3302f0f507bd:46443 2024-11-08T00:37:38,208 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/3302f0f507bd,46443,1731026257983 2024-11-08T00:37:38,219 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37459-0x10117e083db0001, quorum=127.0.0.1:51204, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-08T00:37:38,219 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46443-0x10117e083db0000, quorum=127.0.0.1:51204, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-08T00:37:38,219 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:46443-0x10117e083db0000, quorum=127.0.0.1:51204, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/3302f0f507bd,46443,1731026257983 2024-11-08T00:37:38,229 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37459-0x10117e083db0001, quorum=127.0.0.1:51204, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-08T00:37:38,229 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46443-0x10117e083db0000, quorum=127.0.0.1:51204, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T00:37:38,229 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37459-0x10117e083db0001, quorum=127.0.0.1:51204, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T00:37:38,230 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:46443-0x10117e083db0000, quorum=127.0.0.1:51204, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-08T00:37:38,231 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/3302f0f507bd,46443,1731026257983 from backup master directory 2024-11-08T00:37:38,240 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37459-0x10117e083db0001, quorum=127.0.0.1:51204, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-08T00:37:38,240 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46443-0x10117e083db0000, quorum=127.0.0.1:51204, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/3302f0f507bd,46443,1731026257983 2024-11-08T00:37:38,240 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46443-0x10117e083db0000, quorum=127.0.0.1:51204, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-08T00:37:38,240 WARN [master/3302f0f507bd:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-08T00:37:38,240 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=3302f0f507bd,46443,1731026257983 2024-11-08T00:37:38,247 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/hbase.id] with ID: 3e0521a6-3520-463b-92be-3f022045c549 2024-11-08T00:37:38,247 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/.tmp/hbase.id 2024-11-08T00:37:38,254 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33277 is added to blk_1073741826_1002 (size=42) 2024-11-08T00:37:38,255 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43605 is added to blk_1073741826_1002 (size=42) 2024-11-08T00:37:38,256 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/.tmp/hbase.id]:[hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/hbase.id] 2024-11-08T00:37:38,267 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-08T00:37:38,267 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-08T00:37:38,268 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-08T00:37:38,275 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46443-0x10117e083db0000, quorum=127.0.0.1:51204, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T00:37:38,275 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37459-0x10117e083db0001, quorum=127.0.0.1:51204, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T00:37:38,281 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33277 is added to blk_1073741827_1003 (size=196) 2024-11-08T00:37:38,281 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43605 is added to blk_1073741827_1003 (size=196) 2024-11-08T00:37:38,281 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-08T00:37:38,282 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-08T00:37:38,282 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-08T00:37:38,289 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43605 is added to blk_1073741828_1004 (size=1189) 2024-11-08T00:37:38,289 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33277 is added to blk_1073741828_1004 (size=1189) 2024-11-08T00:37:38,290 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/MasterData/data/master/store 2024-11-08T00:37:38,298 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33277 is added to blk_1073741829_1005 (size=34) 2024-11-08T00:37:38,298 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43605 is added to blk_1073741829_1005 (size=34) 2024-11-08T00:37:38,298 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-08T00:37:38,299 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-08T00:37:38,299 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-08T00:37:38,299 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-08T00:37:38,299 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-08T00:37:38,299 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-08T00:37:38,299 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-08T00:37:38,299 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731026258299Disabling compacts and flushes for region at 1731026258299Disabling writes for close at 1731026258299Writing region close event to WAL at 1731026258299Closed at 1731026258299 2024-11-08T00:37:38,299 WARN [master/3302f0f507bd:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/MasterData/data/master/store/.initializing 2024-11-08T00:37:38,299 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/MasterData/WALs/3302f0f507bd,46443,1731026257983 2024-11-08T00:37:38,302 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=3302f0f507bd%2C46443%2C1731026257983, suffix=, logDir=hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/MasterData/WALs/3302f0f507bd,46443,1731026257983, archiveDir=hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/MasterData/oldWALs, maxLogs=10 2024-11-08T00:37:38,302 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3302f0f507bd%2C46443%2C1731026257983.1731026258302 2024-11-08T00:37:38,306 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/MasterData/WALs/3302f0f507bd,46443,1731026257983/3302f0f507bd%2C46443%2C1731026257983.1731026258302 2024-11-08T00:37:38,307 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33457:33457),(127.0.0.1/127.0.0.1:34681:34681)] 2024-11-08T00:37:38,307 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-08T00:37:38,307 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-08T00:37:38,307 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-08T00:37:38,307 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-08T00:37:38,312 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-08T00:37:38,313 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-08T00:37:38,313 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T00:37:38,314 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-08T00:37:38,314 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-08T00:37:38,315 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-08T00:37:38,315 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T00:37:38,315 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-08T00:37:38,315 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-08T00:37:38,316 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-08T00:37:38,316 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T00:37:38,316 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-08T00:37:38,317 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-08T00:37:38,318 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-08T00:37:38,318 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T00:37:38,318 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-08T00:37:38,318 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-08T00:37:38,319 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-08T00:37:38,319 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-08T00:37:38,320 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-08T00:37:38,320 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-08T00:37:38,320 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-08T00:37:38,321 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-08T00:37:38,323 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-08T00:37:38,324 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=786357, jitterRate=-9.573996067047119E-5}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-08T00:37:38,324 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731026258308Initializing all the Stores at 1731026258308Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731026258308Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731026258312 (+4 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731026258312Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731026258312Cleaning up temporary data from old regions at 1731026258320 (+8 ms)Region opened successfully at 1731026258324 (+4 ms) 2024-11-08T00:37:38,325 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-08T00:37:38,327 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@ae93d4c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=3302f0f507bd/172.17.0.3:0 2024-11-08T00:37:38,328 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-08T00:37:38,328 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-08T00:37:38,328 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-08T00:37:38,328 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-08T00:37:38,329 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-08T00:37:38,329 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-08T00:37:38,329 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-08T00:37:38,331 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-08T00:37:38,331 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46443-0x10117e083db0000, quorum=127.0.0.1:51204, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-08T00:37:38,338 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-08T00:37:38,339 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-08T00:37:38,339 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46443-0x10117e083db0000, quorum=127.0.0.1:51204, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-08T00:37:38,349 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-08T00:37:38,349 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-08T00:37:38,350 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46443-0x10117e083db0000, quorum=127.0.0.1:51204, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-08T00:37:38,360 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-08T00:37:38,361 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46443-0x10117e083db0000, quorum=127.0.0.1:51204, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-08T00:37:38,370 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-08T00:37:38,372 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46443-0x10117e083db0000, quorum=127.0.0.1:51204, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-08T00:37:38,381 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-08T00:37:38,391 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46443-0x10117e083db0000, quorum=127.0.0.1:51204, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-08T00:37:38,391 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46443-0x10117e083db0000, quorum=127.0.0.1:51204, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T00:37:38,391 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37459-0x10117e083db0001, quorum=127.0.0.1:51204, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-08T00:37:38,391 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37459-0x10117e083db0001, quorum=127.0.0.1:51204, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T00:37:38,392 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=3302f0f507bd,46443,1731026257983, sessionid=0x10117e083db0000, setting cluster-up flag (Was=false) 2024-11-08T00:37:38,412 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46443-0x10117e083db0000, quorum=127.0.0.1:51204, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T00:37:38,412 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37459-0x10117e083db0001, quorum=127.0.0.1:51204, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T00:37:38,444 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-08T00:37:38,445 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=3302f0f507bd,46443,1731026257983 2024-11-08T00:37:38,465 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46443-0x10117e083db0000, quorum=127.0.0.1:51204, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T00:37:38,465 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37459-0x10117e083db0001, quorum=127.0.0.1:51204, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T00:37:38,496 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-08T00:37:38,498 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=3302f0f507bd,46443,1731026257983 2024-11-08T00:37:38,499 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-08T00:37:38,500 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-08T00:37:38,500 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-08T00:37:38,500 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-08T00:37:38,501 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 3302f0f507bd,46443,1731026257983 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-08T00:37:38,502 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/3302f0f507bd:0, corePoolSize=5, maxPoolSize=5 2024-11-08T00:37:38,502 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/3302f0f507bd:0, corePoolSize=5, maxPoolSize=5 2024-11-08T00:37:38,502 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/3302f0f507bd:0, corePoolSize=5, maxPoolSize=5 2024-11-08T00:37:38,502 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/3302f0f507bd:0, corePoolSize=5, maxPoolSize=5 2024-11-08T00:37:38,502 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/3302f0f507bd:0, corePoolSize=10, maxPoolSize=10 2024-11-08T00:37:38,502 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/3302f0f507bd:0, corePoolSize=1, maxPoolSize=1 2024-11-08T00:37:38,502 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/3302f0f507bd:0, corePoolSize=2, maxPoolSize=2 2024-11-08T00:37:38,503 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/3302f0f507bd:0, corePoolSize=1, maxPoolSize=1 2024-11-08T00:37:38,504 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731026288504 2024-11-08T00:37:38,504 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-08T00:37:38,504 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-08T00:37:38,504 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-08T00:37:38,504 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-08T00:37:38,504 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-08T00:37:38,504 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-08T00:37:38,504 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-08T00:37:38,504 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-08T00:37:38,505 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-08T00:37:38,505 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-08T00:37:38,505 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-08T00:37:38,505 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-08T00:37:38,505 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-08T00:37:38,505 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-08T00:37:38,506 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T00:37:38,506 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/3302f0f507bd:0:becomeActiveMaster-HFileCleaner.large.0-1731026258505,5,FailOnTimeoutGroup] 2024-11-08T00:37:38,506 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/3302f0f507bd:0:becomeActiveMaster-HFileCleaner.small.0-1731026258506,5,FailOnTimeoutGroup] 2024-11-08T00:37:38,506 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-08T00:37:38,506 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-08T00:37:38,506 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-08T00:37:38,506 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-08T00:37:38,506 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-08T00:37:38,513 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33277 is added to blk_1073741831_1007 (size=1321) 2024-11-08T00:37:38,513 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43605 is added to blk_1073741831_1007 (size=1321) 2024-11-08T00:37:38,514 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-08T00:37:38,514 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797 2024-11-08T00:37:38,519 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33277 is added to blk_1073741832_1008 (size=32) 2024-11-08T00:37:38,519 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43605 is added to blk_1073741832_1008 (size=32) 2024-11-08T00:37:38,520 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-08T00:37:38,521 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-08T00:37:38,523 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-08T00:37:38,523 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T00:37:38,523 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-08T00:37:38,523 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-08T00:37:38,525 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-08T00:37:38,525 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T00:37:38,525 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-08T00:37:38,525 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-08T00:37:38,527 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-08T00:37:38,527 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T00:37:38,527 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-08T00:37:38,527 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-08T00:37:38,528 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-08T00:37:38,528 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T00:37:38,529 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-08T00:37:38,529 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-08T00:37:38,530 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/hbase/meta/1588230740 2024-11-08T00:37:38,530 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/hbase/meta/1588230740 2024-11-08T00:37:38,532 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-08T00:37:38,532 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-08T00:37:38,532 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-08T00:37:38,534 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-08T00:37:38,536 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-08T00:37:38,537 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=699861, jitterRate=-0.11008110642433167}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-08T00:37:38,538 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731026258520Initializing all the Stores at 1731026258521 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731026258521Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731026258521Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731026258521Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731026258521Cleaning up temporary data from old regions at 1731026258532 (+11 ms)Region opened successfully at 1731026258537 (+5 ms) 2024-11-08T00:37:38,538 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-08T00:37:38,538 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-08T00:37:38,538 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-08T00:37:38,538 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-08T00:37:38,538 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-08T00:37:38,538 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-08T00:37:38,538 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731026258538Disabling compacts and flushes for region at 1731026258538Disabling writes for close at 1731026258538Writing region close event to WAL at 1731026258538Closed at 1731026258538 2024-11-08T00:37:38,540 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-08T00:37:38,540 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-08T00:37:38,540 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-08T00:37:38,541 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-08T00:37:38,543 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-08T00:37:38,597 INFO [RS:0;3302f0f507bd:37459 {}] regionserver.HRegionServer(746): ClusterId : 3e0521a6-3520-463b-92be-3f022045c549 2024-11-08T00:37:38,597 DEBUG [RS:0;3302f0f507bd:37459 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-08T00:37:38,609 DEBUG [RS:0;3302f0f507bd:37459 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-08T00:37:38,609 DEBUG [RS:0;3302f0f507bd:37459 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-08T00:37:38,620 DEBUG [RS:0;3302f0f507bd:37459 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-08T00:37:38,620 DEBUG [RS:0;3302f0f507bd:37459 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@23e2ff36, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=3302f0f507bd/172.17.0.3:0 2024-11-08T00:37:38,636 DEBUG [RS:0;3302f0f507bd:37459 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;3302f0f507bd:37459 2024-11-08T00:37:38,636 INFO [RS:0;3302f0f507bd:37459 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-08T00:37:38,636 INFO [RS:0;3302f0f507bd:37459 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-08T00:37:38,636 DEBUG [RS:0;3302f0f507bd:37459 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-08T00:37:38,637 INFO [RS:0;3302f0f507bd:37459 {}] regionserver.HRegionServer(2659): reportForDuty to master=3302f0f507bd,46443,1731026257983 with port=37459, startcode=1731026258169 2024-11-08T00:37:38,637 DEBUG [RS:0;3302f0f507bd:37459 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-08T00:37:38,639 INFO [HMaster-EventLoopGroup-14-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:53457, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.6 (auth:SIMPLE), service=RegionServerStatusService 2024-11-08T00:37:38,639 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46443 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 3302f0f507bd,37459,1731026258169 2024-11-08T00:37:38,639 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46443 {}] master.ServerManager(517): Registering regionserver=3302f0f507bd,37459,1731026258169 2024-11-08T00:37:38,641 DEBUG [RS:0;3302f0f507bd:37459 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797 2024-11-08T00:37:38,641 DEBUG [RS:0;3302f0f507bd:37459 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:34233 2024-11-08T00:37:38,641 DEBUG [RS:0;3302f0f507bd:37459 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-08T00:37:38,650 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46443-0x10117e083db0000, quorum=127.0.0.1:51204, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-08T00:37:38,651 DEBUG [RS:0;3302f0f507bd:37459 {}] zookeeper.ZKUtil(111): regionserver:37459-0x10117e083db0001, quorum=127.0.0.1:51204, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/3302f0f507bd,37459,1731026258169 2024-11-08T00:37:38,651 WARN [RS:0;3302f0f507bd:37459 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-08T00:37:38,651 INFO [RS:0;3302f0f507bd:37459 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-08T00:37:38,651 DEBUG [RS:0;3302f0f507bd:37459 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/WALs/3302f0f507bd,37459,1731026258169 2024-11-08T00:37:38,651 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [3302f0f507bd,37459,1731026258169] 2024-11-08T00:37:38,654 INFO [RS:0;3302f0f507bd:37459 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-08T00:37:38,655 INFO [RS:0;3302f0f507bd:37459 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-08T00:37:38,655 INFO [RS:0;3302f0f507bd:37459 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-08T00:37:38,655 INFO [RS:0;3302f0f507bd:37459 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-08T00:37:38,656 INFO [RS:0;3302f0f507bd:37459 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-08T00:37:38,656 INFO [RS:0;3302f0f507bd:37459 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-08T00:37:38,656 INFO [RS:0;3302f0f507bd:37459 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-08T00:37:38,656 DEBUG [RS:0;3302f0f507bd:37459 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/3302f0f507bd:0, corePoolSize=1, maxPoolSize=1 2024-11-08T00:37:38,656 DEBUG [RS:0;3302f0f507bd:37459 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/3302f0f507bd:0, corePoolSize=1, maxPoolSize=1 2024-11-08T00:37:38,657 DEBUG [RS:0;3302f0f507bd:37459 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/3302f0f507bd:0, corePoolSize=1, maxPoolSize=1 2024-11-08T00:37:38,657 DEBUG [RS:0;3302f0f507bd:37459 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/3302f0f507bd:0, corePoolSize=1, maxPoolSize=1 2024-11-08T00:37:38,657 DEBUG [RS:0;3302f0f507bd:37459 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/3302f0f507bd:0, corePoolSize=1, maxPoolSize=1 2024-11-08T00:37:38,657 DEBUG [RS:0;3302f0f507bd:37459 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/3302f0f507bd:0, corePoolSize=2, maxPoolSize=2 2024-11-08T00:37:38,657 DEBUG [RS:0;3302f0f507bd:37459 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/3302f0f507bd:0, corePoolSize=1, maxPoolSize=1 2024-11-08T00:37:38,657 DEBUG [RS:0;3302f0f507bd:37459 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/3302f0f507bd:0, corePoolSize=1, maxPoolSize=1 2024-11-08T00:37:38,657 DEBUG [RS:0;3302f0f507bd:37459 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/3302f0f507bd:0, corePoolSize=1, maxPoolSize=1 2024-11-08T00:37:38,657 DEBUG [RS:0;3302f0f507bd:37459 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/3302f0f507bd:0, corePoolSize=1, maxPoolSize=1 2024-11-08T00:37:38,657 DEBUG [RS:0;3302f0f507bd:37459 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/3302f0f507bd:0, corePoolSize=1, maxPoolSize=1 2024-11-08T00:37:38,657 DEBUG [RS:0;3302f0f507bd:37459 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/3302f0f507bd:0, corePoolSize=1, maxPoolSize=1 2024-11-08T00:37:38,657 DEBUG [RS:0;3302f0f507bd:37459 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/3302f0f507bd:0, corePoolSize=3, maxPoolSize=3 2024-11-08T00:37:38,657 DEBUG [RS:0;3302f0f507bd:37459 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/3302f0f507bd:0, corePoolSize=3, maxPoolSize=3 2024-11-08T00:37:38,657 INFO [RS:0;3302f0f507bd:37459 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-08T00:37:38,658 INFO [RS:0;3302f0f507bd:37459 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-08T00:37:38,658 INFO [RS:0;3302f0f507bd:37459 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-08T00:37:38,658 INFO [RS:0;3302f0f507bd:37459 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-08T00:37:38,658 INFO [RS:0;3302f0f507bd:37459 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-08T00:37:38,658 INFO [RS:0;3302f0f507bd:37459 {}] hbase.ChoreService(168): Chore ScheduledChore name=3302f0f507bd,37459,1731026258169-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-08T00:37:38,673 INFO [RS:0;3302f0f507bd:37459 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-08T00:37:38,673 INFO [RS:0;3302f0f507bd:37459 {}] hbase.ChoreService(168): Chore ScheduledChore name=3302f0f507bd,37459,1731026258169-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-08T00:37:38,673 INFO [RS:0;3302f0f507bd:37459 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-08T00:37:38,673 INFO [RS:0;3302f0f507bd:37459 {}] regionserver.Replication(171): 3302f0f507bd,37459,1731026258169 started 2024-11-08T00:37:38,685 INFO [RS:0;3302f0f507bd:37459 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-08T00:37:38,685 INFO [RS:0;3302f0f507bd:37459 {}] regionserver.HRegionServer(1482): Serving as 3302f0f507bd,37459,1731026258169, RpcServer on 3302f0f507bd/172.17.0.3:37459, sessionid=0x10117e083db0001 2024-11-08T00:37:38,685 DEBUG [RS:0;3302f0f507bd:37459 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-08T00:37:38,685 DEBUG [RS:0;3302f0f507bd:37459 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 3302f0f507bd,37459,1731026258169 2024-11-08T00:37:38,685 DEBUG [RS:0;3302f0f507bd:37459 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '3302f0f507bd,37459,1731026258169' 2024-11-08T00:37:38,685 DEBUG [RS:0;3302f0f507bd:37459 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-08T00:37:38,686 DEBUG [RS:0;3302f0f507bd:37459 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-08T00:37:38,686 DEBUG [RS:0;3302f0f507bd:37459 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-08T00:37:38,686 DEBUG [RS:0;3302f0f507bd:37459 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-08T00:37:38,686 DEBUG [RS:0;3302f0f507bd:37459 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 3302f0f507bd,37459,1731026258169 2024-11-08T00:37:38,686 DEBUG [RS:0;3302f0f507bd:37459 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '3302f0f507bd,37459,1731026258169' 2024-11-08T00:37:38,686 DEBUG [RS:0;3302f0f507bd:37459 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-08T00:37:38,687 DEBUG [RS:0;3302f0f507bd:37459 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-08T00:37:38,687 DEBUG [RS:0;3302f0f507bd:37459 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-08T00:37:38,687 INFO [RS:0;3302f0f507bd:37459 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-08T00:37:38,687 INFO [RS:0;3302f0f507bd:37459 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-08T00:37:38,693 WARN [3302f0f507bd:46443 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-08T00:37:38,729 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,39151,1731026116929/3302f0f507bd%2C39151%2C1731026116929.1731026117246 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:37:38,729 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.meta.1731026116690.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:37:38,789 INFO [RS:0;3302f0f507bd:37459 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=3302f0f507bd%2C37459%2C1731026258169, suffix=, logDir=hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/WALs/3302f0f507bd,37459,1731026258169, archiveDir=hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/oldWALs, maxLogs=32 2024-11-08T00:37:38,789 INFO [RS:0;3302f0f507bd:37459 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3302f0f507bd%2C37459%2C1731026258169.1731026258789 2024-11-08T00:37:38,797 INFO [RS:0;3302f0f507bd:37459 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/WALs/3302f0f507bd,37459,1731026258169/3302f0f507bd%2C37459%2C1731026258169.1731026258789 2024-11-08T00:37:38,799 DEBUG [RS:0;3302f0f507bd:37459 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33457:33457),(127.0.0.1/127.0.0.1:34681:34681)] 2024-11-08T00:37:38,944 DEBUG [3302f0f507bd:46443 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-08T00:37:38,945 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=3302f0f507bd,37459,1731026258169 2024-11-08T00:37:38,949 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 3302f0f507bd,37459,1731026258169, state=OPENING 2024-11-08T00:37:38,960 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-08T00:37:39,051 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37459-0x10117e083db0001, quorum=127.0.0.1:51204, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T00:37:39,051 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46443-0x10117e083db0000, quorum=127.0.0.1:51204, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T00:37:39,053 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-08T00:37:39,053 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-08T00:37:39,053 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=3302f0f507bd,37459,1731026258169}] 2024-11-08T00:37:39,053 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-08T00:37:39,211 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-08T00:37:39,215 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:60987, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-08T00:37:39,221 INFO [RS_OPEN_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-08T00:37:39,221 INFO [RS_OPEN_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-08T00:37:39,223 INFO [RS_OPEN_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=3302f0f507bd%2C37459%2C1731026258169.meta, suffix=.meta, logDir=hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/WALs/3302f0f507bd,37459,1731026258169, archiveDir=hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/oldWALs, maxLogs=32 2024-11-08T00:37:39,223 INFO [RS_OPEN_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 3302f0f507bd%2C37459%2C1731026258169.meta.1731026259223.meta 2024-11-08T00:37:39,228 INFO [RS_OPEN_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/WALs/3302f0f507bd,37459,1731026258169/3302f0f507bd%2C37459%2C1731026258169.meta.1731026259223.meta 2024-11-08T00:37:39,229 DEBUG [RS_OPEN_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34681:34681),(127.0.0.1/127.0.0.1:33457:33457)] 2024-11-08T00:37:39,230 DEBUG [RS_OPEN_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-08T00:37:39,230 DEBUG [RS_OPEN_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-08T00:37:39,230 DEBUG [RS_OPEN_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-08T00:37:39,230 INFO [RS_OPEN_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-08T00:37:39,230 DEBUG [RS_OPEN_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-08T00:37:39,230 DEBUG [RS_OPEN_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-08T00:37:39,230 DEBUG [RS_OPEN_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-08T00:37:39,230 DEBUG [RS_OPEN_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-08T00:37:39,231 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-08T00:37:39,232 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-08T00:37:39,232 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T00:37:39,232 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-08T00:37:39,232 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-08T00:37:39,233 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-08T00:37:39,233 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T00:37:39,233 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-08T00:37:39,233 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-08T00:37:39,234 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-08T00:37:39,234 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T00:37:39,234 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-08T00:37:39,234 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-08T00:37:39,235 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-08T00:37:39,235 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T00:37:39,235 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-08T00:37:39,236 DEBUG [RS_OPEN_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-08T00:37:39,236 DEBUG [RS_OPEN_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/hbase/meta/1588230740 2024-11-08T00:37:39,237 DEBUG [RS_OPEN_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/hbase/meta/1588230740 2024-11-08T00:37:39,238 DEBUG [RS_OPEN_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-08T00:37:39,238 DEBUG [RS_OPEN_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-08T00:37:39,239 DEBUG [RS_OPEN_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-08T00:37:39,240 DEBUG [RS_OPEN_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-08T00:37:39,240 INFO [RS_OPEN_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=689885, jitterRate=-0.12276704609394073}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-08T00:37:39,240 DEBUG [RS_OPEN_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-08T00:37:39,241 DEBUG [RS_OPEN_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731026259230Writing region info on filesystem at 1731026259230Initializing all the Stores at 1731026259231 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731026259231Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731026259231Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731026259231Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731026259231Cleaning up temporary data from old regions at 1731026259238 (+7 ms)Running coprocessor post-open hooks at 1731026259241 (+3 ms)Region opened successfully at 1731026259241 2024-11-08T00:37:39,242 INFO [RS_OPEN_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731026259211 2024-11-08T00:37:39,244 DEBUG [RS_OPEN_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-08T00:37:39,244 INFO [RS_OPEN_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-08T00:37:39,245 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=3302f0f507bd,37459,1731026258169 2024-11-08T00:37:39,246 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 3302f0f507bd,37459,1731026258169, state=OPEN 2024-11-08T00:37:39,280 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46443-0x10117e083db0000, quorum=127.0.0.1:51204, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-08T00:37:39,280 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37459-0x10117e083db0001, quorum=127.0.0.1:51204, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-08T00:37:39,280 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=3302f0f507bd,37459,1731026258169 2024-11-08T00:37:39,280 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-08T00:37:39,280 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-08T00:37:39,283 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-08T00:37:39,284 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=3302f0f507bd,37459,1731026258169 in 227 msec 2024-11-08T00:37:39,287 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-08T00:37:39,287 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 744 msec 2024-11-08T00:37:39,288 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-08T00:37:39,288 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-08T00:37:39,290 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-08T00:37:39,290 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3302f0f507bd,37459,1731026258169, seqNum=-1] 2024-11-08T00:37:39,291 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-08T00:37:39,292 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:45843, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-08T00:37:39,298 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 797 msec 2024-11-08T00:37:39,298 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731026259298, completionTime=-1 2024-11-08T00:37:39,298 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-08T00:37:39,298 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-08T00:37:39,300 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-08T00:37:39,300 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731026319300 2024-11-08T00:37:39,300 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731026379300 2024-11-08T00:37:39,300 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 2 msec 2024-11-08T00:37:39,301 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3302f0f507bd,46443,1731026257983-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-08T00:37:39,301 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3302f0f507bd,46443,1731026257983-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-08T00:37:39,301 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3302f0f507bd,46443,1731026257983-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-08T00:37:39,301 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-3302f0f507bd:46443, period=300000, unit=MILLISECONDS is enabled. 2024-11-08T00:37:39,301 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-08T00:37:39,301 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-08T00:37:39,303 DEBUG [master/3302f0f507bd:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-08T00:37:39,305 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.065sec 2024-11-08T00:37:39,305 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-08T00:37:39,305 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-08T00:37:39,305 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-08T00:37:39,305 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-08T00:37:39,305 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-08T00:37:39,305 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3302f0f507bd,46443,1731026257983-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-08T00:37:39,306 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3302f0f507bd,46443,1731026257983-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-08T00:37:39,308 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-08T00:37:39,308 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-08T00:37:39,308 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3302f0f507bd,46443,1731026257983-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-08T00:37:39,397 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@68949c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-08T00:37:39,397 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 3302f0f507bd,46443,-1 for getting cluster id 2024-11-08T00:37:39,397 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-08T00:37:39,400 DEBUG [HMaster-EventLoopGroup-14-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '3e0521a6-3520-463b-92be-3f022045c549' 2024-11-08T00:37:39,400 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-08T00:37:39,401 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "3e0521a6-3520-463b-92be-3f022045c549" 2024-11-08T00:37:39,401 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@74247222, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-08T00:37:39,401 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [3302f0f507bd,46443,-1] 2024-11-08T00:37:39,402 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-08T00:37:39,402 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-08T00:37:39,404 INFO [HMaster-EventLoopGroup-14-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:45216, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-08T00:37:39,405 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2d1bfa18, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-08T00:37:39,406 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-08T00:37:39,407 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3302f0f507bd,37459,1731026258169, seqNum=-1] 2024-11-08T00:37:39,408 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-08T00:37:39,409 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:58702, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-08T00:37:39,411 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=3302f0f507bd,46443,1731026257983 2024-11-08T00:37:39,411 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-08T00:37:39,414 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-08T00:37:39,414 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-08T00:37:39,415 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.AsyncConnectionImpl(321): The fetched master address is 3302f0f507bd,46443,1731026257983 2024-11-08T00:37:39,415 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@2933b4d7 2024-11-08T00:37:39,416 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-08T00:37:39,417 INFO [HMaster-EventLoopGroup-14-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:45222, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-08T00:37:39,417 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46443 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-08T00:37:39,417 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46443 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-08T00:37:39,418 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46443 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.3 create 'TestLogRolling-testLogRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-08T00:37:39,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46443 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRolling 2024-11-08T00:37:39,421 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-08T00:37:39,421 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T00:37:39,421 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46443 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.3 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRolling" procId is: 4 2024-11-08T00:37:39,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46443 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-08T00:37:39,422 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-08T00:37:39,430 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33277 is added to blk_1073741835_1011 (size=381) 2024-11-08T00:37:39,430 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43605 is added to blk_1073741835_1011 (size=381) 2024-11-08T00:37:39,432 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 5e8733a307b5de0ca2decb5cd91d5420, NAME => 'TestLogRolling-testLogRolling,,1731026259417.5e8733a307b5de0ca2decb5cd91d5420.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797 2024-11-08T00:37:39,436 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43605 is added to blk_1073741836_1012 (size=64) 2024-11-08T00:37:39,437 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33277 is added to blk_1073741836_1012 (size=64) 2024-11-08T00:37:39,437 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1731026259417.5e8733a307b5de0ca2decb5cd91d5420.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-08T00:37:39,437 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1722): Closing 5e8733a307b5de0ca2decb5cd91d5420, disabling compactions & flushes 2024-11-08T00:37:39,437 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1731026259417.5e8733a307b5de0ca2decb5cd91d5420. 2024-11-08T00:37:39,438 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1731026259417.5e8733a307b5de0ca2decb5cd91d5420. 2024-11-08T00:37:39,438 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1731026259417.5e8733a307b5de0ca2decb5cd91d5420. after waiting 0 ms 2024-11-08T00:37:39,438 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1731026259417.5e8733a307b5de0ca2decb5cd91d5420. 2024-11-08T00:37:39,438 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1731026259417.5e8733a307b5de0ca2decb5cd91d5420. 2024-11-08T00:37:39,438 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for 5e8733a307b5de0ca2decb5cd91d5420: Waiting for close lock at 1731026259437Disabling compacts and flushes for region at 1731026259437Disabling writes for close at 1731026259438 (+1 ms)Writing region close event to WAL at 1731026259438Closed at 1731026259438 2024-11-08T00:37:39,439 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-08T00:37:39,439 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRolling,,1731026259417.5e8733a307b5de0ca2decb5cd91d5420.","families":{"info":[{"qualifier":"regioninfo","vlen":63,"tag":[],"timestamp":"1731026259439"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731026259439"}]},"ts":"1731026259439"} 2024-11-08T00:37:39,442 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-08T00:37:39,443 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-08T00:37:39,443 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731026259443"}]},"ts":"1731026259443"} 2024-11-08T00:37:39,446 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRolling, state=ENABLING in hbase:meta 2024-11-08T00:37:39,446 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=5e8733a307b5de0ca2decb5cd91d5420, ASSIGN}] 2024-11-08T00:37:39,447 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=5e8733a307b5de0ca2decb5cd91d5420, ASSIGN 2024-11-08T00:37:39,448 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=5e8733a307b5de0ca2decb5cd91d5420, ASSIGN; state=OFFLINE, location=3302f0f507bd,37459,1731026258169; forceNewPlan=false, retain=false 2024-11-08T00:37:39,600 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=5e8733a307b5de0ca2decb5cd91d5420, regionState=OPENING, regionLocation=3302f0f507bd,37459,1731026258169 2024-11-08T00:37:39,607 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=5e8733a307b5de0ca2decb5cd91d5420, ASSIGN because future has completed 2024-11-08T00:37:39,608 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 5e8733a307b5de0ca2decb5cd91d5420, server=3302f0f507bd,37459,1731026258169}] 2024-11-08T00:37:39,730 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,39151,1731026116929/3302f0f507bd%2C39151%2C1731026116929.1731026117246 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:37:39,730 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.meta.1731026116690.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:37:39,770 INFO [RS_OPEN_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,,1731026259417.5e8733a307b5de0ca2decb5cd91d5420. 2024-11-08T00:37:39,770 DEBUG [RS_OPEN_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 5e8733a307b5de0ca2decb5cd91d5420, NAME => 'TestLogRolling-testLogRolling,,1731026259417.5e8733a307b5de0ca2decb5cd91d5420.', STARTKEY => '', ENDKEY => ''} 2024-11-08T00:37:39,770 DEBUG [RS_OPEN_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling 5e8733a307b5de0ca2decb5cd91d5420 2024-11-08T00:37:39,771 DEBUG [RS_OPEN_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1731026259417.5e8733a307b5de0ca2decb5cd91d5420.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-08T00:37:39,771 DEBUG [RS_OPEN_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 5e8733a307b5de0ca2decb5cd91d5420 2024-11-08T00:37:39,771 DEBUG [RS_OPEN_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 5e8733a307b5de0ca2decb5cd91d5420 2024-11-08T00:37:39,773 INFO [StoreOpener-5e8733a307b5de0ca2decb5cd91d5420-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 5e8733a307b5de0ca2decb5cd91d5420 2024-11-08T00:37:39,774 INFO [StoreOpener-5e8733a307b5de0ca2decb5cd91d5420-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 5e8733a307b5de0ca2decb5cd91d5420 columnFamilyName info 2024-11-08T00:37:39,775 DEBUG [StoreOpener-5e8733a307b5de0ca2decb5cd91d5420-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T00:37:39,775 INFO [StoreOpener-5e8733a307b5de0ca2decb5cd91d5420-1 {}] regionserver.HStore(327): Store=5e8733a307b5de0ca2decb5cd91d5420/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-08T00:37:39,775 DEBUG [RS_OPEN_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 5e8733a307b5de0ca2decb5cd91d5420 2024-11-08T00:37:39,776 DEBUG [RS_OPEN_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/5e8733a307b5de0ca2decb5cd91d5420 2024-11-08T00:37:39,777 DEBUG [RS_OPEN_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/5e8733a307b5de0ca2decb5cd91d5420 2024-11-08T00:37:39,777 DEBUG [RS_OPEN_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 5e8733a307b5de0ca2decb5cd91d5420 2024-11-08T00:37:39,777 DEBUG [RS_OPEN_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 5e8733a307b5de0ca2decb5cd91d5420 2024-11-08T00:37:39,780 DEBUG [RS_OPEN_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 5e8733a307b5de0ca2decb5cd91d5420 2024-11-08T00:37:39,783 DEBUG [RS_OPEN_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/5e8733a307b5de0ca2decb5cd91d5420/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-08T00:37:39,783 INFO [RS_OPEN_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 5e8733a307b5de0ca2decb5cd91d5420; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=822039, jitterRate=0.04527755081653595}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-08T00:37:39,783 DEBUG [RS_OPEN_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 5e8733a307b5de0ca2decb5cd91d5420 2024-11-08T00:37:39,784 DEBUG [RS_OPEN_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 5e8733a307b5de0ca2decb5cd91d5420: Running coprocessor pre-open hook at 1731026259771Writing region info on filesystem at 1731026259771Initializing all the Stores at 1731026259772 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731026259772Cleaning up temporary data from old regions at 1731026259777 (+5 ms)Running coprocessor post-open hooks at 1731026259783 (+6 ms)Region opened successfully at 1731026259784 (+1 ms) 2024-11-08T00:37:39,785 INFO [RS_OPEN_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,,1731026259417.5e8733a307b5de0ca2decb5cd91d5420., pid=6, masterSystemTime=1731026259763 2024-11-08T00:37:39,787 DEBUG [RS_OPEN_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,,1731026259417.5e8733a307b5de0ca2decb5cd91d5420. 2024-11-08T00:37:39,787 INFO [RS_OPEN_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,,1731026259417.5e8733a307b5de0ca2decb5cd91d5420. 2024-11-08T00:37:39,788 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=5e8733a307b5de0ca2decb5cd91d5420, regionState=OPEN, openSeqNum=2, regionLocation=3302f0f507bd,37459,1731026258169 2024-11-08T00:37:39,790 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 5e8733a307b5de0ca2decb5cd91d5420, server=3302f0f507bd,37459,1731026258169 because future has completed 2024-11-08T00:37:39,793 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-08T00:37:39,793 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 5e8733a307b5de0ca2decb5cd91d5420, server=3302f0f507bd,37459,1731026258169 in 183 msec 2024-11-08T00:37:39,795 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-08T00:37:39,795 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=5e8733a307b5de0ca2decb5cd91d5420, ASSIGN in 347 msec 2024-11-08T00:37:39,796 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-08T00:37:39,796 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731026259796"}]},"ts":"1731026259796"} 2024-11-08T00:37:39,798 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRolling, state=ENABLED in hbase:meta 2024-11-08T00:37:39,799 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-08T00:37:39,801 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRolling in 381 msec 2024-11-08T00:37:39,919 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:37:39,920 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:37:39,920 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:37:39,920 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:37:39,921 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:37:39,921 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:37:39,923 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:37:39,923 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:37:39,945 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:37:39,945 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:37:39,946 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:37:39,946 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:37:39,946 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:37:39,946 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:37:39,949 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:37:39,949 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:37:39,950 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:37:39,951 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:37:40,456 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-08T00:37:40,458 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:37:40,458 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:37:40,458 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:37:40,459 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:37:40,460 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:37:40,460 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:37:40,461 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:37:40,461 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:37:40,485 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:37:40,486 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:37:40,486 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:37:40,486 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:37:40,486 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:37:40,486 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:37:40,490 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:37:40,490 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:37:40,490 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:37:40,493 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:37:40,731 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,39151,1731026116929/3302f0f507bd%2C39151%2C1731026116929.1731026117246 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:37:40,731 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.meta.1731026116690.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:37:41,732 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,39151,1731026116929/3302f0f507bd%2C39151%2C1731026116929.1731026117246 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:37:41,732 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.meta.1731026116690.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:37:41,966 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling 2024-11-08T00:37:41,967 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling Metrics about Tables on a single HBase RegionServer 2024-11-08T00:37:41,968 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-08T00:37:42,733 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,39151,1731026116929/3302f0f507bd%2C39151%2C1731026116929.1731026117246 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:37:42,733 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.meta.1731026116690.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:37:43,734 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,39151,1731026116929/3302f0f507bd%2C39151%2C1731026116929.1731026117246 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:37:43,734 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.meta.1731026116690.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:37:44,654 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-08T00:37:44,655 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRolling' 2024-11-08T00:37:44,734 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,39151,1731026116929/3302f0f507bd%2C39151%2C1731026116929.1731026117246 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:37:44,734 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.meta.1731026116690.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:37:45,736 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.meta.1731026116690.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:37:45,736 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,39151,1731026116929/3302f0f507bd%2C39151%2C1731026116929.1731026117246 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:37:46,737 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,39151,1731026116929/3302f0f507bd%2C39151%2C1731026116929.1731026117246 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:37:46,737 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.meta.1731026116690.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:37:47,473 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-08T00:37:47,473 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:37:47,474 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:37:47,474 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:37:47,474 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:37:47,475 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:37:47,475 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:37:47,475 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:37:47,476 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:37:47,500 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:37:47,501 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:37:47,501 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:37:47,501 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:37:47,501 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:37:47,502 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:37:47,505 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:37:47,505 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:37:47,506 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:37:47,508 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:37:47,738 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.meta.1731026116690.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:37:47,738 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,39151,1731026116929/3302f0f507bd%2C39151%2C1731026116929.1731026117246 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:37:48,739 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,39151,1731026116929/3302f0f507bd%2C39151%2C1731026116929.1731026117246 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:37:48,739 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.meta.1731026116690.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:37:49,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46443 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-08T00:37:49,456 INFO [RPCClient-NioEventLoopGroup-4-7 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRolling completed 2024-11-08T00:37:49,457 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRolling,, stopping at row=TestLogRolling-testLogRolling ,, for max=2147483647 with caching=100 2024-11-08T00:37:49,463 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRolling 2024-11-08T00:37:49,463 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRolling,,1731026259417.5e8733a307b5de0ca2decb5cd91d5420. 2024-11-08T00:37:49,467 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testLogRolling,,1731026259417.5e8733a307b5de0ca2decb5cd91d5420., hostname=3302f0f507bd,37459,1731026258169, seqNum=2] 2024-11-08T00:37:49,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37459 {}] regionserver.HRegion(8855): Flush requested on 5e8733a307b5de0ca2decb5cd91d5420 2024-11-08T00:37:49,482 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 5e8733a307b5de0ca2decb5cd91d5420 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-08T00:37:49,500 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/5e8733a307b5de0ca2decb5cd91d5420/.tmp/info/91783a98729e48899349376653ebd3c3 is 1080, key is row0001/info:/1731026269468/Put/seqid=0 2024-11-08T00:37:49,508 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43605 is added to blk_1073741837_1013 (size=12509) 2024-11-08T00:37:49,508 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33277 is added to blk_1073741837_1013 (size=12509) 2024-11-08T00:37:49,508 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/5e8733a307b5de0ca2decb5cd91d5420/.tmp/info/91783a98729e48899349376653ebd3c3 2024-11-08T00:37:49,514 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/5e8733a307b5de0ca2decb5cd91d5420/.tmp/info/91783a98729e48899349376653ebd3c3 as hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/5e8733a307b5de0ca2decb5cd91d5420/info/91783a98729e48899349376653ebd3c3 2024-11-08T00:37:49,520 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/5e8733a307b5de0ca2decb5cd91d5420/info/91783a98729e48899349376653ebd3c3, entries=7, sequenceid=11, filesize=12.2 K 2024-11-08T00:37:49,521 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=19.96 KB/20444 for 5e8733a307b5de0ca2decb5cd91d5420 in 39ms, sequenceid=11, compaction requested=false 2024-11-08T00:37:49,521 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 5e8733a307b5de0ca2decb5cd91d5420: 2024-11-08T00:37:49,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37459 {}] regionserver.HRegion(8855): Flush requested on 5e8733a307b5de0ca2decb5cd91d5420 2024-11-08T00:37:49,522 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 5e8733a307b5de0ca2decb5cd91d5420 1/1 column families, dataSize=21.02 KB heapSize=22.75 KB 2024-11-08T00:37:49,526 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/5e8733a307b5de0ca2decb5cd91d5420/.tmp/info/67885496c162474f8d33bbb449cd82fd is 1080, key is row0008/info:/1731026269483/Put/seqid=0 2024-11-08T00:37:49,530 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43605 is added to blk_1073741838_1014 (size=26530) 2024-11-08T00:37:49,531 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33277 is added to blk_1073741838_1014 (size=26530) 2024-11-08T00:37:49,531 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=21.02 KB at sequenceid=34 (bloomFilter=true), to=hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/5e8733a307b5de0ca2decb5cd91d5420/.tmp/info/67885496c162474f8d33bbb449cd82fd 2024-11-08T00:37:49,536 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/5e8733a307b5de0ca2decb5cd91d5420/.tmp/info/67885496c162474f8d33bbb449cd82fd as hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/5e8733a307b5de0ca2decb5cd91d5420/info/67885496c162474f8d33bbb449cd82fd 2024-11-08T00:37:49,540 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/5e8733a307b5de0ca2decb5cd91d5420/info/67885496c162474f8d33bbb449cd82fd, entries=20, sequenceid=34, filesize=25.9 K 2024-11-08T00:37:49,541 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~21.02 KB/21520, heapSize ~22.73 KB/23280, currentSize=5.25 KB/5380 for 5e8733a307b5de0ca2decb5cd91d5420 in 19ms, sequenceid=34, compaction requested=false 2024-11-08T00:37:49,541 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 5e8733a307b5de0ca2decb5cd91d5420: 2024-11-08T00:37:49,541 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=38.1 K, sizeToCheck=16.0 K 2024-11-08T00:37:49,541 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-08T00:37:49,541 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/5e8733a307b5de0ca2decb5cd91d5420/info/67885496c162474f8d33bbb449cd82fd because midkey is the same as first or last row 2024-11-08T00:37:49,739 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.meta.1731026116690.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:37:49,739 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,39151,1731026116929/3302f0f507bd%2C39151%2C1731026116929.1731026117246 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:37:50,740 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,39151,1731026116929/3302f0f507bd%2C39151%2C1731026116929.1731026117246 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:37:50,740 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.meta.1731026116690.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:37:51,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37459 {}] regionserver.HRegion(8855): Flush requested on 5e8733a307b5de0ca2decb5cd91d5420 2024-11-08T00:37:51,543 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 5e8733a307b5de0ca2decb5cd91d5420 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-08T00:37:51,548 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/5e8733a307b5de0ca2decb5cd91d5420/.tmp/info/bddb79b8d03245358c7712da459a28c6 is 1080, key is row0028/info:/1731026269523/Put/seqid=0 2024-11-08T00:37:51,553 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33277 is added to blk_1073741839_1015 (size=12509) 2024-11-08T00:37:51,553 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43605 is added to blk_1073741839_1015 (size=12509) 2024-11-08T00:37:51,554 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=44 (bloomFilter=true), to=hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/5e8733a307b5de0ca2decb5cd91d5420/.tmp/info/bddb79b8d03245358c7712da459a28c6 2024-11-08T00:37:51,560 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/5e8733a307b5de0ca2decb5cd91d5420/.tmp/info/bddb79b8d03245358c7712da459a28c6 as hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/5e8733a307b5de0ca2decb5cd91d5420/info/bddb79b8d03245358c7712da459a28c6 2024-11-08T00:37:51,565 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/5e8733a307b5de0ca2decb5cd91d5420/info/bddb79b8d03245358c7712da459a28c6, entries=7, sequenceid=44, filesize=12.2 K 2024-11-08T00:37:51,566 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=10.51 KB/10760 for 5e8733a307b5de0ca2decb5cd91d5420 in 23ms, sequenceid=44, compaction requested=true 2024-11-08T00:37:51,566 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 5e8733a307b5de0ca2decb5cd91d5420: 2024-11-08T00:37:51,566 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=50.3 K, sizeToCheck=16.0 K 2024-11-08T00:37:51,566 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-08T00:37:51,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37459 {}] regionserver.HRegion(8855): Flush requested on 5e8733a307b5de0ca2decb5cd91d5420 2024-11-08T00:37:51,566 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/5e8733a307b5de0ca2decb5cd91d5420/info/67885496c162474f8d33bbb449cd82fd because midkey is the same as first or last row 2024-11-08T00:37:51,567 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5e8733a307b5de0ca2decb5cd91d5420:info, priority=-2147483648, current under compaction store size is 1 2024-11-08T00:37:51,567 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-08T00:37:51,567 DEBUG [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-08T00:37:51,567 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 5e8733a307b5de0ca2decb5cd91d5420 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-11-08T00:37:51,568 DEBUG [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 51548 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-08T00:37:51,568 DEBUG [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] regionserver.HStore(1541): 5e8733a307b5de0ca2decb5cd91d5420/info is initiating minor compaction (all files) 2024-11-08T00:37:51,568 INFO [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 5e8733a307b5de0ca2decb5cd91d5420/info in TestLogRolling-testLogRolling,,1731026259417.5e8733a307b5de0ca2decb5cd91d5420. 2024-11-08T00:37:51,568 INFO [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/5e8733a307b5de0ca2decb5cd91d5420/info/91783a98729e48899349376653ebd3c3, hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/5e8733a307b5de0ca2decb5cd91d5420/info/67885496c162474f8d33bbb449cd82fd, hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/5e8733a307b5de0ca2decb5cd91d5420/info/bddb79b8d03245358c7712da459a28c6] into tmpdir=hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/5e8733a307b5de0ca2decb5cd91d5420/.tmp, totalSize=50.3 K 2024-11-08T00:37:51,569 DEBUG [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] compactions.Compactor(225): Compacting 91783a98729e48899349376653ebd3c3, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1731026269468 2024-11-08T00:37:51,569 DEBUG [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] compactions.Compactor(225): Compacting 67885496c162474f8d33bbb449cd82fd, keycount=20, bloomtype=ROW, size=25.9 K, encoding=NONE, compression=NONE, seqNum=34, earliestPutTs=1731026269483 2024-11-08T00:37:51,569 DEBUG [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] compactions.Compactor(225): Compacting bddb79b8d03245358c7712da459a28c6, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=44, earliestPutTs=1731026269523 2024-11-08T00:37:51,571 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/5e8733a307b5de0ca2decb5cd91d5420/.tmp/info/15be68b041414456bf155a4a7f8fc70f is 1080, key is row0035/info:/1731026271544/Put/seqid=0 2024-11-08T00:37:51,575 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33277 is added to blk_1073741840_1016 (size=16817) 2024-11-08T00:37:51,575 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43605 is added to blk_1073741840_1016 (size=16817) 2024-11-08T00:37:51,576 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=58 (bloomFilter=true), to=hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/5e8733a307b5de0ca2decb5cd91d5420/.tmp/info/15be68b041414456bf155a4a7f8fc70f 2024-11-08T00:37:51,583 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/5e8733a307b5de0ca2decb5cd91d5420/.tmp/info/15be68b041414456bf155a4a7f8fc70f as hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/5e8733a307b5de0ca2decb5cd91d5420/info/15be68b041414456bf155a4a7f8fc70f 2024-11-08T00:37:51,585 INFO [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5e8733a307b5de0ca2decb5cd91d5420#info#compaction#59 average throughput is 17.44 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-08T00:37:51,586 DEBUG [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/5e8733a307b5de0ca2decb5cd91d5420/.tmp/info/5b41390638cf44c29248a5e6be8ecf23 is 1080, key is row0001/info:/1731026269468/Put/seqid=0 2024-11-08T00:37:51,588 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/5e8733a307b5de0ca2decb5cd91d5420/info/15be68b041414456bf155a4a7f8fc70f, entries=11, sequenceid=58, filesize=16.4 K 2024-11-08T00:37:51,589 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=10.51 KB/10760 for 5e8733a307b5de0ca2decb5cd91d5420 in 22ms, sequenceid=58, compaction requested=false 2024-11-08T00:37:51,590 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 5e8733a307b5de0ca2decb5cd91d5420: 2024-11-08T00:37:51,590 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=66.8 K, sizeToCheck=16.0 K 2024-11-08T00:37:51,590 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-08T00:37:51,590 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/5e8733a307b5de0ca2decb5cd91d5420/info/67885496c162474f8d33bbb449cd82fd because midkey is the same as first or last row 2024-11-08T00:37:51,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37459 {}] regionserver.HRegion(8855): Flush requested on 5e8733a307b5de0ca2decb5cd91d5420 2024-11-08T00:37:51,590 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 5e8733a307b5de0ca2decb5cd91d5420 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-11-08T00:37:51,591 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33277 is added to blk_1073741841_1017 (size=41747) 2024-11-08T00:37:51,591 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43605 is added to blk_1073741841_1017 (size=41747) 2024-11-08T00:37:51,594 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/5e8733a307b5de0ca2decb5cd91d5420/.tmp/info/2d32ea4aff1940a699f632aa83460add is 1080, key is row0046/info:/1731026271568/Put/seqid=0 2024-11-08T00:37:51,598 DEBUG [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/5e8733a307b5de0ca2decb5cd91d5420/.tmp/info/5b41390638cf44c29248a5e6be8ecf23 as hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/5e8733a307b5de0ca2decb5cd91d5420/info/5b41390638cf44c29248a5e6be8ecf23 2024-11-08T00:37:51,603 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33277 is added to blk_1073741842_1018 (size=16817) 2024-11-08T00:37:51,603 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43605 is added to blk_1073741842_1018 (size=16817) 2024-11-08T00:37:51,607 INFO [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 5e8733a307b5de0ca2decb5cd91d5420/info of 5e8733a307b5de0ca2decb5cd91d5420 into 5b41390638cf44c29248a5e6be8ecf23(size=40.8 K), total size for store is 57.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-08T00:37:51,607 DEBUG [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 5e8733a307b5de0ca2decb5cd91d5420: 2024-11-08T00:37:51,607 INFO [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1731026259417.5e8733a307b5de0ca2decb5cd91d5420., storeName=5e8733a307b5de0ca2decb5cd91d5420/info, priority=13, startTime=1731026271567; duration=0sec 2024-11-08T00:37:51,607 DEBUG [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=57.2 K, sizeToCheck=16.0 K 2024-11-08T00:37:51,607 DEBUG [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-08T00:37:51,607 DEBUG [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/5e8733a307b5de0ca2decb5cd91d5420/info/5b41390638cf44c29248a5e6be8ecf23 because midkey is the same as first or last row 2024-11-08T00:37:51,607 DEBUG [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=57.2 K, sizeToCheck=16.0 K 2024-11-08T00:37:51,607 DEBUG [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-08T00:37:51,607 DEBUG [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/5e8733a307b5de0ca2decb5cd91d5420/info/5b41390638cf44c29248a5e6be8ecf23 because midkey is the same as first or last row 2024-11-08T00:37:51,607 DEBUG [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=57.2 K, sizeToCheck=16.0 K 2024-11-08T00:37:51,607 DEBUG [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-08T00:37:51,607 DEBUG [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/5e8733a307b5de0ca2decb5cd91d5420/info/5b41390638cf44c29248a5e6be8ecf23 because midkey is the same as first or last row 2024-11-08T00:37:51,607 DEBUG [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-08T00:37:51,607 DEBUG [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5e8733a307b5de0ca2decb5cd91d5420:info 2024-11-08T00:37:51,742 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,39151,1731026116929/3302f0f507bd%2C39151%2C1731026116929.1731026117246 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:37:51,742 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.meta.1731026116690.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:37:52,004 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/5e8733a307b5de0ca2decb5cd91d5420/.tmp/info/2d32ea4aff1940a699f632aa83460add 2024-11-08T00:37:52,011 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/5e8733a307b5de0ca2decb5cd91d5420/.tmp/info/2d32ea4aff1940a699f632aa83460add as hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/5e8733a307b5de0ca2decb5cd91d5420/info/2d32ea4aff1940a699f632aa83460add 2024-11-08T00:37:52,016 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/5e8733a307b5de0ca2decb5cd91d5420/info/2d32ea4aff1940a699f632aa83460add, entries=11, sequenceid=72, filesize=16.4 K 2024-11-08T00:37:52,017 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=8.41 KB/8608 for 5e8733a307b5de0ca2decb5cd91d5420 in 427ms, sequenceid=72, compaction requested=true 2024-11-08T00:37:52,017 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 5e8733a307b5de0ca2decb5cd91d5420: 2024-11-08T00:37:52,017 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=73.6 K, sizeToCheck=16.0 K 2024-11-08T00:37:52,017 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-08T00:37:52,017 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/5e8733a307b5de0ca2decb5cd91d5420/info/5b41390638cf44c29248a5e6be8ecf23 because midkey is the same as first or last row 2024-11-08T00:37:52,017 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5e8733a307b5de0ca2decb5cd91d5420:info, priority=-2147483648, current under compaction store size is 1 2024-11-08T00:37:52,017 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-08T00:37:52,018 DEBUG [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-08T00:37:52,019 DEBUG [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 75381 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-08T00:37:52,019 DEBUG [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] regionserver.HStore(1541): 5e8733a307b5de0ca2decb5cd91d5420/info is initiating minor compaction (all files) 2024-11-08T00:37:52,019 INFO [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 5e8733a307b5de0ca2decb5cd91d5420/info in TestLogRolling-testLogRolling,,1731026259417.5e8733a307b5de0ca2decb5cd91d5420. 2024-11-08T00:37:52,019 INFO [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/5e8733a307b5de0ca2decb5cd91d5420/info/5b41390638cf44c29248a5e6be8ecf23, hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/5e8733a307b5de0ca2decb5cd91d5420/info/15be68b041414456bf155a4a7f8fc70f, hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/5e8733a307b5de0ca2decb5cd91d5420/info/2d32ea4aff1940a699f632aa83460add] into tmpdir=hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/5e8733a307b5de0ca2decb5cd91d5420/.tmp, totalSize=73.6 K 2024-11-08T00:37:52,019 DEBUG [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] compactions.Compactor(225): Compacting 5b41390638cf44c29248a5e6be8ecf23, keycount=34, bloomtype=ROW, size=40.8 K, encoding=NONE, compression=NONE, seqNum=44, earliestPutTs=1731026269468 2024-11-08T00:37:52,020 DEBUG [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] compactions.Compactor(225): Compacting 15be68b041414456bf155a4a7f8fc70f, keycount=11, bloomtype=ROW, size=16.4 K, encoding=NONE, compression=NONE, seqNum=58, earliestPutTs=1731026271544 2024-11-08T00:37:52,020 DEBUG [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] compactions.Compactor(225): Compacting 2d32ea4aff1940a699f632aa83460add, keycount=11, bloomtype=ROW, size=16.4 K, encoding=NONE, compression=NONE, seqNum=72, earliestPutTs=1731026271568 2024-11-08T00:37:52,030 INFO [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5e8733a307b5de0ca2decb5cd91d5420#info#compaction#61 average throughput is 57.46 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-08T00:37:52,031 DEBUG [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/5e8733a307b5de0ca2decb5cd91d5420/.tmp/info/0ab858d8ef0a4e7c927eca3350461716 is 1080, key is row0001/info:/1731026269468/Put/seqid=0 2024-11-08T00:37:52,035 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33277 is added to blk_1073741843_1019 (size=65612) 2024-11-08T00:37:52,035 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43605 is added to blk_1073741843_1019 (size=65612) 2024-11-08T00:37:52,040 DEBUG [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/5e8733a307b5de0ca2decb5cd91d5420/.tmp/info/0ab858d8ef0a4e7c927eca3350461716 as hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/5e8733a307b5de0ca2decb5cd91d5420/info/0ab858d8ef0a4e7c927eca3350461716 2024-11-08T00:37:52,046 INFO [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 5e8733a307b5de0ca2decb5cd91d5420/info of 5e8733a307b5de0ca2decb5cd91d5420 into 0ab858d8ef0a4e7c927eca3350461716(size=64.1 K), total size for store is 64.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-08T00:37:52,046 DEBUG [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 5e8733a307b5de0ca2decb5cd91d5420: 2024-11-08T00:37:52,046 INFO [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1731026259417.5e8733a307b5de0ca2decb5cd91d5420., storeName=5e8733a307b5de0ca2decb5cd91d5420/info, priority=13, startTime=1731026272017; duration=0sec 2024-11-08T00:37:52,046 DEBUG [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=64.1 K, sizeToCheck=16.0 K 2024-11-08T00:37:52,046 DEBUG [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-08T00:37:52,046 DEBUG [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/5e8733a307b5de0ca2decb5cd91d5420/info/0ab858d8ef0a4e7c927eca3350461716 because midkey is the same as first or last row 2024-11-08T00:37:52,046 DEBUG [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=64.1 K, sizeToCheck=16.0 K 2024-11-08T00:37:52,046 DEBUG [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-08T00:37:52,047 DEBUG [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/5e8733a307b5de0ca2decb5cd91d5420/info/0ab858d8ef0a4e7c927eca3350461716 because midkey is the same as first or last row 2024-11-08T00:37:52,047 DEBUG [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=64.1 K, sizeToCheck=16.0 K 2024-11-08T00:37:52,047 DEBUG [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-08T00:37:52,047 DEBUG [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/5e8733a307b5de0ca2decb5cd91d5420/info/0ab858d8ef0a4e7c927eca3350461716 because midkey is the same as first or last row 2024-11-08T00:37:52,047 DEBUG [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-08T00:37:52,047 DEBUG [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5e8733a307b5de0ca2decb5cd91d5420:info 2024-11-08T00:37:52,743 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,39151,1731026116929/3302f0f507bd%2C39151%2C1731026116929.1731026117246 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:37:52,743 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.meta.1731026116690.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:37:53,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37459 {}] regionserver.HRegion(8855): Flush requested on 5e8733a307b5de0ca2decb5cd91d5420 2024-11-08T00:37:53,613 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 5e8733a307b5de0ca2decb5cd91d5420 1/1 column families, dataSize=9.46 KB heapSize=10.38 KB 2024-11-08T00:37:53,620 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/5e8733a307b5de0ca2decb5cd91d5420/.tmp/info/649d5b01f1fe468e85d0bb635df5ef2d is 1080, key is row0057/info:/1731026271591/Put/seqid=0 2024-11-08T00:37:53,626 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33277 is added to blk_1073741844_1020 (size=14663) 2024-11-08T00:37:53,626 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43605 is added to blk_1073741844_1020 (size=14663) 2024-11-08T00:37:53,626 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=9.46 KB at sequenceid=86 (bloomFilter=true), to=hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/5e8733a307b5de0ca2decb5cd91d5420/.tmp/info/649d5b01f1fe468e85d0bb635df5ef2d 2024-11-08T00:37:53,633 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/5e8733a307b5de0ca2decb5cd91d5420/.tmp/info/649d5b01f1fe468e85d0bb635df5ef2d as hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/5e8733a307b5de0ca2decb5cd91d5420/info/649d5b01f1fe468e85d0bb635df5ef2d 2024-11-08T00:37:53,638 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/5e8733a307b5de0ca2decb5cd91d5420/info/649d5b01f1fe468e85d0bb635df5ef2d, entries=9, sequenceid=86, filesize=14.3 K 2024-11-08T00:37:53,639 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~9.46 KB/9684, heapSize ~10.36 KB/10608, currentSize=10.51 KB/10760 for 5e8733a307b5de0ca2decb5cd91d5420 in 26ms, sequenceid=86, compaction requested=false 2024-11-08T00:37:53,639 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 5e8733a307b5de0ca2decb5cd91d5420: 2024-11-08T00:37:53,639 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=78.4 K, sizeToCheck=16.0 K 2024-11-08T00:37:53,639 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-08T00:37:53,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37459 {}] regionserver.HRegion(8855): Flush requested on 5e8733a307b5de0ca2decb5cd91d5420 2024-11-08T00:37:53,639 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/5e8733a307b5de0ca2decb5cd91d5420/info/0ab858d8ef0a4e7c927eca3350461716 because midkey is the same as first or last row 2024-11-08T00:37:53,640 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 5e8733a307b5de0ca2decb5cd91d5420 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-11-08T00:37:53,643 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/5e8733a307b5de0ca2decb5cd91d5420/.tmp/info/5da07a8ee0174bff9cbb923f8d19e010 is 1080, key is row0066/info:/1731026273616/Put/seqid=0 2024-11-08T00:37:53,648 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33277 is added to blk_1073741845_1021 (size=16817) 2024-11-08T00:37:53,649 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=100 (bloomFilter=true), to=hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/5e8733a307b5de0ca2decb5cd91d5420/.tmp/info/5da07a8ee0174bff9cbb923f8d19e010 2024-11-08T00:37:53,650 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43605 is added to blk_1073741845_1021 (size=16817) 2024-11-08T00:37:53,656 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/5e8733a307b5de0ca2decb5cd91d5420/.tmp/info/5da07a8ee0174bff9cbb923f8d19e010 as hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/5e8733a307b5de0ca2decb5cd91d5420/info/5da07a8ee0174bff9cbb923f8d19e010 2024-11-08T00:37:53,662 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/5e8733a307b5de0ca2decb5cd91d5420/info/5da07a8ee0174bff9cbb923f8d19e010, entries=11, sequenceid=100, filesize=16.4 K 2024-11-08T00:37:53,663 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=11.56 KB/11836 for 5e8733a307b5de0ca2decb5cd91d5420 in 23ms, sequenceid=100, compaction requested=true 2024-11-08T00:37:53,663 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 5e8733a307b5de0ca2decb5cd91d5420: 2024-11-08T00:37:53,663 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=94.8 K, sizeToCheck=16.0 K 2024-11-08T00:37:53,663 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-08T00:37:53,663 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/5e8733a307b5de0ca2decb5cd91d5420/info/0ab858d8ef0a4e7c927eca3350461716 because midkey is the same as first or last row 2024-11-08T00:37:53,663 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5e8733a307b5de0ca2decb5cd91d5420:info, priority=-2147483648, current under compaction store size is 1 2024-11-08T00:37:53,663 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-08T00:37:53,663 DEBUG [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-08T00:37:53,664 DEBUG [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 97092 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-08T00:37:53,664 DEBUG [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] regionserver.HStore(1541): 5e8733a307b5de0ca2decb5cd91d5420/info is initiating minor compaction (all files) 2024-11-08T00:37:53,664 INFO [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 5e8733a307b5de0ca2decb5cd91d5420/info in TestLogRolling-testLogRolling,,1731026259417.5e8733a307b5de0ca2decb5cd91d5420. 2024-11-08T00:37:53,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37459 {}] regionserver.HRegion(8855): Flush requested on 5e8733a307b5de0ca2decb5cd91d5420 2024-11-08T00:37:53,665 INFO [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/5e8733a307b5de0ca2decb5cd91d5420/info/0ab858d8ef0a4e7c927eca3350461716, hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/5e8733a307b5de0ca2decb5cd91d5420/info/649d5b01f1fe468e85d0bb635df5ef2d, hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/5e8733a307b5de0ca2decb5cd91d5420/info/5da07a8ee0174bff9cbb923f8d19e010] into tmpdir=hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/5e8733a307b5de0ca2decb5cd91d5420/.tmp, totalSize=94.8 K 2024-11-08T00:37:53,665 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 5e8733a307b5de0ca2decb5cd91d5420 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-11-08T00:37:53,665 DEBUG [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] compactions.Compactor(225): Compacting 0ab858d8ef0a4e7c927eca3350461716, keycount=56, bloomtype=ROW, size=64.1 K, encoding=NONE, compression=NONE, seqNum=72, earliestPutTs=1731026269468 2024-11-08T00:37:53,665 DEBUG [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] compactions.Compactor(225): Compacting 649d5b01f1fe468e85d0bb635df5ef2d, keycount=9, bloomtype=ROW, size=14.3 K, encoding=NONE, compression=NONE, seqNum=86, earliestPutTs=1731026271591 2024-11-08T00:37:53,666 DEBUG [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] compactions.Compactor(225): Compacting 5da07a8ee0174bff9cbb923f8d19e010, keycount=11, bloomtype=ROW, size=16.4 K, encoding=NONE, compression=NONE, seqNum=100, earliestPutTs=1731026273616 2024-11-08T00:37:53,669 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/5e8733a307b5de0ca2decb5cd91d5420/.tmp/info/7a53a813528c40669e5144480a39fd8a is 1080, key is row0077/info:/1731026273641/Put/seqid=0 2024-11-08T00:37:53,673 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43605 is added to blk_1073741846_1022 (size=17894) 2024-11-08T00:37:53,674 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33277 is added to blk_1073741846_1022 (size=17894) 2024-11-08T00:37:53,675 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=115 (bloomFilter=true), to=hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/5e8733a307b5de0ca2decb5cd91d5420/.tmp/info/7a53a813528c40669e5144480a39fd8a 2024-11-08T00:37:53,679 INFO [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5e8733a307b5de0ca2decb5cd91d5420#info#compaction#65 average throughput is 26.00 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-08T00:37:53,680 DEBUG [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/5e8733a307b5de0ca2decb5cd91d5420/.tmp/info/3d891a0fe3384a94ac90209c3eba3c60 is 1080, key is row0001/info:/1731026269468/Put/seqid=0 2024-11-08T00:37:53,683 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/5e8733a307b5de0ca2decb5cd91d5420/.tmp/info/7a53a813528c40669e5144480a39fd8a as hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/5e8733a307b5de0ca2decb5cd91d5420/info/7a53a813528c40669e5144480a39fd8a 2024-11-08T00:37:53,687 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43605 is added to blk_1073741847_1023 (size=87327) 2024-11-08T00:37:53,688 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33277 is added to blk_1073741847_1023 (size=87327) 2024-11-08T00:37:53,689 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/5e8733a307b5de0ca2decb5cd91d5420/info/7a53a813528c40669e5144480a39fd8a, entries=12, sequenceid=115, filesize=17.5 K 2024-11-08T00:37:53,690 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=8.41 KB/8608 for 5e8733a307b5de0ca2decb5cd91d5420 in 25ms, sequenceid=115, compaction requested=false 2024-11-08T00:37:53,690 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 5e8733a307b5de0ca2decb5cd91d5420: 2024-11-08T00:37:53,690 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=112.3 K, sizeToCheck=16.0 K 2024-11-08T00:37:53,690 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-08T00:37:53,691 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/5e8733a307b5de0ca2decb5cd91d5420/info/0ab858d8ef0a4e7c927eca3350461716 because midkey is the same as first or last row 2024-11-08T00:37:53,694 DEBUG [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/5e8733a307b5de0ca2decb5cd91d5420/.tmp/info/3d891a0fe3384a94ac90209c3eba3c60 as hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/5e8733a307b5de0ca2decb5cd91d5420/info/3d891a0fe3384a94ac90209c3eba3c60 2024-11-08T00:37:53,700 INFO [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 5e8733a307b5de0ca2decb5cd91d5420/info of 5e8733a307b5de0ca2decb5cd91d5420 into 3d891a0fe3384a94ac90209c3eba3c60(size=85.3 K), total size for store is 102.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-08T00:37:53,700 DEBUG [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 5e8733a307b5de0ca2decb5cd91d5420: 2024-11-08T00:37:53,700 INFO [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1731026259417.5e8733a307b5de0ca2decb5cd91d5420., storeName=5e8733a307b5de0ca2decb5cd91d5420/info, priority=13, startTime=1731026273663; duration=0sec 2024-11-08T00:37:53,700 DEBUG [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=102.8 K, sizeToCheck=16.0 K 2024-11-08T00:37:53,700 DEBUG [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-08T00:37:53,700 DEBUG [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=102.8 K, sizeToCheck=16.0 K 2024-11-08T00:37:53,700 DEBUG [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-08T00:37:53,700 DEBUG [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=102.8 K, sizeToCheck=16.0 K 2024-11-08T00:37:53,700 DEBUG [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-08T00:37:53,701 DEBUG [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] regionserver.CompactSplit(239): Splitting TestLogRolling-testLogRolling,,1731026259417.5e8733a307b5de0ca2decb5cd91d5420., compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-08T00:37:53,701 DEBUG [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-08T00:37:53,701 DEBUG [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5e8733a307b5de0ca2decb5cd91d5420:info 2024-11-08T00:37:53,702 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46443 {}] assignment.AssignmentManager(1355): Split request from 3302f0f507bd,37459,1731026258169, parent={ENCODED => 5e8733a307b5de0ca2decb5cd91d5420, NAME => 'TestLogRolling-testLogRolling,,1731026259417.5e8733a307b5de0ca2decb5cd91d5420.', STARTKEY => '', ENDKEY => ''}, splitKey=row0062 2024-11-08T00:37:53,707 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46443 {}] assignment.SplitTableRegionProcedure(223): Splittable=true state=OPEN, location=3302f0f507bd,37459,1731026258169 2024-11-08T00:37:53,711 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46443 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=5e8733a307b5de0ca2decb5cd91d5420, daughterA=21fe38dceb9bbbd8caa13dbb69e57a46, daughterB=67bc71518d7e759aa09a87781b7c9b7d 2024-11-08T00:37:53,712 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=5e8733a307b5de0ca2decb5cd91d5420, daughterA=21fe38dceb9bbbd8caa13dbb69e57a46, daughterB=67bc71518d7e759aa09a87781b7c9b7d 2024-11-08T00:37:53,712 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=5e8733a307b5de0ca2decb5cd91d5420, daughterA=21fe38dceb9bbbd8caa13dbb69e57a46, daughterB=67bc71518d7e759aa09a87781b7c9b7d 2024-11-08T00:37:53,712 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=5e8733a307b5de0ca2decb5cd91d5420, daughterA=21fe38dceb9bbbd8caa13dbb69e57a46, daughterB=67bc71518d7e759aa09a87781b7c9b7d 2024-11-08T00:37:53,720 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=5e8733a307b5de0ca2decb5cd91d5420, UNASSIGN}] 2024-11-08T00:37:53,721 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=5e8733a307b5de0ca2decb5cd91d5420, UNASSIGN 2024-11-08T00:37:53,723 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=5e8733a307b5de0ca2decb5cd91d5420, regionState=CLOSING, regionLocation=3302f0f507bd,37459,1731026258169 2024-11-08T00:37:53,725 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=5e8733a307b5de0ca2decb5cd91d5420, UNASSIGN because future has completed 2024-11-08T00:37:53,726 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: true: evictOnSplit: true: evictOnClose: false 2024-11-08T00:37:53,726 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=9, ppid=8, state=RUNNABLE, hasLock=false; CloseRegionProcedure 5e8733a307b5de0ca2decb5cd91d5420, server=3302f0f507bd,37459,1731026258169}] 2024-11-08T00:37:53,744 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,39151,1731026116929/3302f0f507bd%2C39151%2C1731026116929.1731026117246 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:37:53,744 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.meta.1731026116690.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:37:53,884 INFO [RS_CLOSE_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(122): Close 5e8733a307b5de0ca2decb5cd91d5420 2024-11-08T00:37:53,884 DEBUG [RS_CLOSE_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(136): Unassign region: split region: true: evictCache: true 2024-11-08T00:37:53,885 DEBUG [RS_CLOSE_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1722): Closing 5e8733a307b5de0ca2decb5cd91d5420, disabling compactions & flushes 2024-11-08T00:37:53,885 INFO [RS_CLOSE_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1731026259417.5e8733a307b5de0ca2decb5cd91d5420. 2024-11-08T00:37:53,885 DEBUG [RS_CLOSE_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1731026259417.5e8733a307b5de0ca2decb5cd91d5420. 2024-11-08T00:37:53,885 DEBUG [RS_CLOSE_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1731026259417.5e8733a307b5de0ca2decb5cd91d5420. after waiting 0 ms 2024-11-08T00:37:53,885 DEBUG [RS_CLOSE_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1731026259417.5e8733a307b5de0ca2decb5cd91d5420. 2024-11-08T00:37:53,885 INFO [RS_CLOSE_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(2902): Flushing 5e8733a307b5de0ca2decb5cd91d5420 1/1 column families, dataSize=8.41 KB heapSize=9.25 KB 2024-11-08T00:37:53,891 DEBUG [RS_CLOSE_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/5e8733a307b5de0ca2decb5cd91d5420/.tmp/info/e45604636afb46ed9fcfe82f0d6c579d is 1080, key is row0089/info:/1731026273666/Put/seqid=0 2024-11-08T00:37:53,898 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33277 is added to blk_1073741848_1024 (size=13586) 2024-11-08T00:37:53,898 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43605 is added to blk_1073741848_1024 (size=13586) 2024-11-08T00:37:53,898 INFO [RS_CLOSE_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.41 KB at sequenceid=127 (bloomFilter=true), to=hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/5e8733a307b5de0ca2decb5cd91d5420/.tmp/info/e45604636afb46ed9fcfe82f0d6c579d 2024-11-08T00:37:53,905 DEBUG [RS_CLOSE_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/5e8733a307b5de0ca2decb5cd91d5420/.tmp/info/e45604636afb46ed9fcfe82f0d6c579d as hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/5e8733a307b5de0ca2decb5cd91d5420/info/e45604636afb46ed9fcfe82f0d6c579d 2024-11-08T00:37:53,911 INFO [RS_CLOSE_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/5e8733a307b5de0ca2decb5cd91d5420/info/e45604636afb46ed9fcfe82f0d6c579d, entries=8, sequenceid=127, filesize=13.3 K 2024-11-08T00:37:53,913 INFO [RS_CLOSE_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(3140): Finished flush of dataSize ~8.41 KB/8608, heapSize ~9.23 KB/9456, currentSize=0 B/0 for 5e8733a307b5de0ca2decb5cd91d5420 in 28ms, sequenceid=127, compaction requested=true 2024-11-08T00:37:53,915 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731026259417.5e8733a307b5de0ca2decb5cd91d5420.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/5e8733a307b5de0ca2decb5cd91d5420/info/91783a98729e48899349376653ebd3c3, hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/5e8733a307b5de0ca2decb5cd91d5420/info/67885496c162474f8d33bbb449cd82fd, hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/5e8733a307b5de0ca2decb5cd91d5420/info/5b41390638cf44c29248a5e6be8ecf23, hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/5e8733a307b5de0ca2decb5cd91d5420/info/bddb79b8d03245358c7712da459a28c6, hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/5e8733a307b5de0ca2decb5cd91d5420/info/15be68b041414456bf155a4a7f8fc70f, hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/5e8733a307b5de0ca2decb5cd91d5420/info/0ab858d8ef0a4e7c927eca3350461716, hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/5e8733a307b5de0ca2decb5cd91d5420/info/2d32ea4aff1940a699f632aa83460add, hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/5e8733a307b5de0ca2decb5cd91d5420/info/649d5b01f1fe468e85d0bb635df5ef2d, hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/5e8733a307b5de0ca2decb5cd91d5420/info/5da07a8ee0174bff9cbb923f8d19e010] to archive 2024-11-08T00:37:53,916 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731026259417.5e8733a307b5de0ca2decb5cd91d5420.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-08T00:37:53,919 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731026259417.5e8733a307b5de0ca2decb5cd91d5420.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/5e8733a307b5de0ca2decb5cd91d5420/info/91783a98729e48899349376653ebd3c3 to hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/archive/data/default/TestLogRolling-testLogRolling/5e8733a307b5de0ca2decb5cd91d5420/info/91783a98729e48899349376653ebd3c3 2024-11-08T00:37:53,920 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731026259417.5e8733a307b5de0ca2decb5cd91d5420.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/5e8733a307b5de0ca2decb5cd91d5420/info/67885496c162474f8d33bbb449cd82fd to hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/archive/data/default/TestLogRolling-testLogRolling/5e8733a307b5de0ca2decb5cd91d5420/info/67885496c162474f8d33bbb449cd82fd 2024-11-08T00:37:53,921 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731026259417.5e8733a307b5de0ca2decb5cd91d5420.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/5e8733a307b5de0ca2decb5cd91d5420/info/5b41390638cf44c29248a5e6be8ecf23 to hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/archive/data/default/TestLogRolling-testLogRolling/5e8733a307b5de0ca2decb5cd91d5420/info/5b41390638cf44c29248a5e6be8ecf23 2024-11-08T00:37:53,923 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731026259417.5e8733a307b5de0ca2decb5cd91d5420.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/5e8733a307b5de0ca2decb5cd91d5420/info/bddb79b8d03245358c7712da459a28c6 to hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/archive/data/default/TestLogRolling-testLogRolling/5e8733a307b5de0ca2decb5cd91d5420/info/bddb79b8d03245358c7712da459a28c6 2024-11-08T00:37:53,924 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731026259417.5e8733a307b5de0ca2decb5cd91d5420.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/5e8733a307b5de0ca2decb5cd91d5420/info/15be68b041414456bf155a4a7f8fc70f to hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/archive/data/default/TestLogRolling-testLogRolling/5e8733a307b5de0ca2decb5cd91d5420/info/15be68b041414456bf155a4a7f8fc70f 2024-11-08T00:37:53,925 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731026259417.5e8733a307b5de0ca2decb5cd91d5420.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/5e8733a307b5de0ca2decb5cd91d5420/info/0ab858d8ef0a4e7c927eca3350461716 to hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/archive/data/default/TestLogRolling-testLogRolling/5e8733a307b5de0ca2decb5cd91d5420/info/0ab858d8ef0a4e7c927eca3350461716 2024-11-08T00:37:53,926 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731026259417.5e8733a307b5de0ca2decb5cd91d5420.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/5e8733a307b5de0ca2decb5cd91d5420/info/2d32ea4aff1940a699f632aa83460add to hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/archive/data/default/TestLogRolling-testLogRolling/5e8733a307b5de0ca2decb5cd91d5420/info/2d32ea4aff1940a699f632aa83460add 2024-11-08T00:37:53,927 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731026259417.5e8733a307b5de0ca2decb5cd91d5420.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/5e8733a307b5de0ca2decb5cd91d5420/info/649d5b01f1fe468e85d0bb635df5ef2d to hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/archive/data/default/TestLogRolling-testLogRolling/5e8733a307b5de0ca2decb5cd91d5420/info/649d5b01f1fe468e85d0bb635df5ef2d 2024-11-08T00:37:53,928 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731026259417.5e8733a307b5de0ca2decb5cd91d5420.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/5e8733a307b5de0ca2decb5cd91d5420/info/5da07a8ee0174bff9cbb923f8d19e010 to hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/archive/data/default/TestLogRolling-testLogRolling/5e8733a307b5de0ca2decb5cd91d5420/info/5da07a8ee0174bff9cbb923f8d19e010 2024-11-08T00:37:53,934 DEBUG [RS_CLOSE_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/5e8733a307b5de0ca2decb5cd91d5420/recovered.edits/130.seqid, newMaxSeqId=130, maxSeqId=1 2024-11-08T00:37:53,935 INFO [RS_CLOSE_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1731026259417.5e8733a307b5de0ca2decb5cd91d5420. 2024-11-08T00:37:53,935 DEBUG [RS_CLOSE_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1676): Region close journal for 5e8733a307b5de0ca2decb5cd91d5420: Waiting for close lock at 1731026273885Running coprocessor pre-close hooks at 1731026273885Disabling compacts and flushes for region at 1731026273885Disabling writes for close at 1731026273885Obtaining lock to block concurrent updates at 1731026273885Preparing flush snapshotting stores in 5e8733a307b5de0ca2decb5cd91d5420 at 1731026273885Finished memstore snapshotting TestLogRolling-testLogRolling,,1731026259417.5e8733a307b5de0ca2decb5cd91d5420., syncing WAL and waiting on mvcc, flushsize=dataSize=8608, getHeapSize=9456, getOffHeapSize=0, getCellsCount=8 at 1731026273886 (+1 ms)Flushing stores of TestLogRolling-testLogRolling,,1731026259417.5e8733a307b5de0ca2decb5cd91d5420. at 1731026273886Flushing 5e8733a307b5de0ca2decb5cd91d5420/info: creating writer at 1731026273887 (+1 ms)Flushing 5e8733a307b5de0ca2decb5cd91d5420/info: appending metadata at 1731026273891 (+4 ms)Flushing 5e8733a307b5de0ca2decb5cd91d5420/info: closing flushed file at 1731026273891Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2b2f893a: reopening flushed file at 1731026273904 (+13 ms)Finished flush of dataSize ~8.41 KB/8608, heapSize ~9.23 KB/9456, currentSize=0 B/0 for 5e8733a307b5de0ca2decb5cd91d5420 in 28ms, sequenceid=127, compaction requested=true at 1731026273913 (+9 ms)Writing region close event to WAL at 1731026273930 (+17 ms)Running coprocessor post-close hooks at 1731026273935 (+5 ms)Closed at 1731026273935 2024-11-08T00:37:53,937 INFO [RS_CLOSE_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(157): Closed 5e8733a307b5de0ca2decb5cd91d5420 2024-11-08T00:37:53,937 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=5e8733a307b5de0ca2decb5cd91d5420, regionState=CLOSED 2024-11-08T00:37:53,939 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=9, ppid=8, state=RUNNABLE, hasLock=false; CloseRegionProcedure 5e8733a307b5de0ca2decb5cd91d5420, server=3302f0f507bd,37459,1731026258169 because future has completed 2024-11-08T00:37:53,942 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=9, resume processing ppid=8 2024-11-08T00:37:53,942 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, ppid=8, state=SUCCESS, hasLock=false; CloseRegionProcedure 5e8733a307b5de0ca2decb5cd91d5420, server=3302f0f507bd,37459,1731026258169 in 214 msec 2024-11-08T00:37:53,944 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-11-08T00:37:53,944 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=5e8733a307b5de0ca2decb5cd91d5420, UNASSIGN in 222 msec 2024-11-08T00:37:53,952 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T00:37:53,955 INFO [PEWorker-4 {}] assignment.SplitTableRegionProcedure(728): pid=7 splitting 3 storefiles, region=5e8733a307b5de0ca2decb5cd91d5420, threads=3 2024-11-08T00:37:53,956 DEBUG [StoreFileSplitter-pool-1 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/5e8733a307b5de0ca2decb5cd91d5420/info/7a53a813528c40669e5144480a39fd8a for region: 5e8733a307b5de0ca2decb5cd91d5420 2024-11-08T00:37:53,956 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/5e8733a307b5de0ca2decb5cd91d5420/info/3d891a0fe3384a94ac90209c3eba3c60 for region: 5e8733a307b5de0ca2decb5cd91d5420 2024-11-08T00:37:53,956 DEBUG [StoreFileSplitter-pool-2 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/5e8733a307b5de0ca2decb5cd91d5420/info/e45604636afb46ed9fcfe82f0d6c579d for region: 5e8733a307b5de0ca2decb5cd91d5420 2024-11-08T00:37:53,965 DEBUG [StoreFileSplitter-pool-2 {}] regionserver.HRegionFileSystem(650): Will create HFileLink file for hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/5e8733a307b5de0ca2decb5cd91d5420/info/e45604636afb46ed9fcfe82f0d6c579d, top=true 2024-11-08T00:37:53,965 DEBUG [StoreFileSplitter-pool-1 {}] regionserver.HRegionFileSystem(650): Will create HFileLink file for hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/5e8733a307b5de0ca2decb5cd91d5420/info/7a53a813528c40669e5144480a39fd8a, top=true 2024-11-08T00:37:53,971 INFO [StoreFileSplitter-pool-1 {}] regionserver.HRegionFileSystem(691): Created linkFile:hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/info/TestLogRolling-testLogRolling=5e8733a307b5de0ca2decb5cd91d5420-7a53a813528c40669e5144480a39fd8a for child: 67bc71518d7e759aa09a87781b7c9b7d, parent: 5e8733a307b5de0ca2decb5cd91d5420 2024-11-08T00:37:53,971 DEBUG [StoreFileSplitter-pool-1 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/5e8733a307b5de0ca2decb5cd91d5420/info/7a53a813528c40669e5144480a39fd8a for region: 5e8733a307b5de0ca2decb5cd91d5420 2024-11-08T00:37:53,976 INFO [StoreFileSplitter-pool-2 {}] regionserver.HRegionFileSystem(691): Created linkFile:hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/info/TestLogRolling-testLogRolling=5e8733a307b5de0ca2decb5cd91d5420-e45604636afb46ed9fcfe82f0d6c579d for child: 67bc71518d7e759aa09a87781b7c9b7d, parent: 5e8733a307b5de0ca2decb5cd91d5420 2024-11-08T00:37:53,976 DEBUG [StoreFileSplitter-pool-2 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/5e8733a307b5de0ca2decb5cd91d5420/info/e45604636afb46ed9fcfe82f0d6c579d for region: 5e8733a307b5de0ca2decb5cd91d5420 2024-11-08T00:37:53,977 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33277 is added to blk_1073741849_1025 (size=27) 2024-11-08T00:37:53,977 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43605 is added to blk_1073741849_1025 (size=27) 2024-11-08T00:37:53,986 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43605 is added to blk_1073741850_1026 (size=27) 2024-11-08T00:37:53,986 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33277 is added to blk_1073741850_1026 (size=27) 2024-11-08T00:37:53,986 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/5e8733a307b5de0ca2decb5cd91d5420/info/3d891a0fe3384a94ac90209c3eba3c60 for region: 5e8733a307b5de0ca2decb5cd91d5420 2024-11-08T00:37:53,988 DEBUG [PEWorker-4 {}] assignment.SplitTableRegionProcedure(802): pid=7 split storefiles for region 5e8733a307b5de0ca2decb5cd91d5420 Daughter A: [hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/21fe38dceb9bbbd8caa13dbb69e57a46/info/3d891a0fe3384a94ac90209c3eba3c60.5e8733a307b5de0ca2decb5cd91d5420] storefiles, Daughter B: [hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/info/3d891a0fe3384a94ac90209c3eba3c60.5e8733a307b5de0ca2decb5cd91d5420, hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/info/TestLogRolling-testLogRolling=5e8733a307b5de0ca2decb5cd91d5420-7a53a813528c40669e5144480a39fd8a, hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/info/TestLogRolling-testLogRolling=5e8733a307b5de0ca2decb5cd91d5420-e45604636afb46ed9fcfe82f0d6c579d] storefiles. 2024-11-08T00:37:53,995 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33277 is added to blk_1073741851_1027 (size=71) 2024-11-08T00:37:53,995 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43605 is added to blk_1073741851_1027 (size=71) 2024-11-08T00:37:53,997 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T00:37:54,007 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43605 is added to blk_1073741852_1028 (size=71) 2024-11-08T00:37:54,007 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33277 is added to blk_1073741852_1028 (size=71) 2024-11-08T00:37:54,009 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T00:37:54,017 DEBUG [PEWorker-4 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/21fe38dceb9bbbd8caa13dbb69e57a46/recovered.edits/130.seqid, newMaxSeqId=130, maxSeqId=-1 2024-11-08T00:37:54,019 DEBUG [PEWorker-4 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/recovered.edits/130.seqid, newMaxSeqId=130, maxSeqId=-1 2024-11-08T00:37:54,022 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,,1731026259417.5e8733a307b5de0ca2decb5cd91d5420.","families":{"info":[{"qualifier":"regioninfo","vlen":63,"tag":[],"timestamp":"1731026274021"},{"qualifier":"splitA","vlen":70,"tag":[],"timestamp":"1731026274021"},{"qualifier":"splitB","vlen":70,"tag":[],"timestamp":"1731026274021"}]},"ts":"1731026274021"} 2024-11-08T00:37:54,022 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,,1731026273707.21fe38dceb9bbbd8caa13dbb69e57a46.","families":{"info":[{"qualifier":"regioninfo","vlen":70,"tag":[],"timestamp":"1731026274021"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731026274021"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1731026274021"}]},"ts":"1731026274021"} 2024-11-08T00:37:54,022 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,row0062,1731026273707.67bc71518d7e759aa09a87781b7c9b7d.","families":{"info":[{"qualifier":"regioninfo","vlen":70,"tag":[],"timestamp":"1731026274021"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731026274021"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1731026274021"}]},"ts":"1731026274021"} 2024-11-08T00:37:54,037 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=21fe38dceb9bbbd8caa13dbb69e57a46, ASSIGN}, {pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=67bc71518d7e759aa09a87781b7c9b7d, ASSIGN}] 2024-11-08T00:37:54,038 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=21fe38dceb9bbbd8caa13dbb69e57a46, ASSIGN 2024-11-08T00:37:54,038 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=67bc71518d7e759aa09a87781b7c9b7d, ASSIGN 2024-11-08T00:37:54,039 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=21fe38dceb9bbbd8caa13dbb69e57a46, ASSIGN; state=SPLITTING_NEW, location=3302f0f507bd,37459,1731026258169; forceNewPlan=false, retain=false 2024-11-08T00:37:54,039 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=67bc71518d7e759aa09a87781b7c9b7d, ASSIGN; state=SPLITTING_NEW, location=3302f0f507bd,37459,1731026258169; forceNewPlan=false, retain=false 2024-11-08T00:37:54,190 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=11 updating hbase:meta row=67bc71518d7e759aa09a87781b7c9b7d, regionState=OPENING, regionLocation=3302f0f507bd,37459,1731026258169 2024-11-08T00:37:54,190 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=10 updating hbase:meta row=21fe38dceb9bbbd8caa13dbb69e57a46, regionState=OPENING, regionLocation=3302f0f507bd,37459,1731026258169 2024-11-08T00:37:54,192 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=21fe38dceb9bbbd8caa13dbb69e57a46, ASSIGN because future has completed 2024-11-08T00:37:54,193 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=12, ppid=10, state=RUNNABLE, hasLock=false; OpenRegionProcedure 21fe38dceb9bbbd8caa13dbb69e57a46, server=3302f0f507bd,37459,1731026258169}] 2024-11-08T00:37:54,193 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=67bc71518d7e759aa09a87781b7c9b7d, ASSIGN because future has completed 2024-11-08T00:37:54,194 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=13, ppid=11, state=RUNNABLE, hasLock=false; OpenRegionProcedure 67bc71518d7e759aa09a87781b7c9b7d, server=3302f0f507bd,37459,1731026258169}] 2024-11-08T00:37:54,350 INFO [RS_OPEN_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,,1731026273707.21fe38dceb9bbbd8caa13dbb69e57a46. 2024-11-08T00:37:54,351 DEBUG [RS_OPEN_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7752): Opening region: {ENCODED => 21fe38dceb9bbbd8caa13dbb69e57a46, NAME => 'TestLogRolling-testLogRolling,,1731026273707.21fe38dceb9bbbd8caa13dbb69e57a46.', STARTKEY => '', ENDKEY => 'row0062'} 2024-11-08T00:37:54,351 DEBUG [RS_OPEN_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling 21fe38dceb9bbbd8caa13dbb69e57a46 2024-11-08T00:37:54,351 DEBUG [RS_OPEN_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1731026273707.21fe38dceb9bbbd8caa13dbb69e57a46.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-08T00:37:54,351 DEBUG [RS_OPEN_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7794): checking encryption for 21fe38dceb9bbbd8caa13dbb69e57a46 2024-11-08T00:37:54,352 DEBUG [RS_OPEN_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7797): checking classloading for 21fe38dceb9bbbd8caa13dbb69e57a46 2024-11-08T00:37:54,354 INFO [StoreOpener-21fe38dceb9bbbd8caa13dbb69e57a46-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 21fe38dceb9bbbd8caa13dbb69e57a46 2024-11-08T00:37:54,355 INFO [StoreOpener-21fe38dceb9bbbd8caa13dbb69e57a46-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 21fe38dceb9bbbd8caa13dbb69e57a46 columnFamilyName info 2024-11-08T00:37:54,356 DEBUG [StoreOpener-21fe38dceb9bbbd8caa13dbb69e57a46-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T00:37:54,372 DEBUG [StoreOpener-21fe38dceb9bbbd8caa13dbb69e57a46-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/21fe38dceb9bbbd8caa13dbb69e57a46/info/3d891a0fe3384a94ac90209c3eba3c60.5e8733a307b5de0ca2decb5cd91d5420->hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/5e8733a307b5de0ca2decb5cd91d5420/info/3d891a0fe3384a94ac90209c3eba3c60-bottom 2024-11-08T00:37:54,373 INFO [StoreOpener-21fe38dceb9bbbd8caa13dbb69e57a46-1 {}] regionserver.HStore(327): Store=21fe38dceb9bbbd8caa13dbb69e57a46/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-08T00:37:54,373 DEBUG [RS_OPEN_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1038): replaying wal for 21fe38dceb9bbbd8caa13dbb69e57a46 2024-11-08T00:37:54,374 DEBUG [RS_OPEN_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/21fe38dceb9bbbd8caa13dbb69e57a46 2024-11-08T00:37:54,375 DEBUG [RS_OPEN_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/21fe38dceb9bbbd8caa13dbb69e57a46 2024-11-08T00:37:54,375 DEBUG [RS_OPEN_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1048): stopping wal replay for 21fe38dceb9bbbd8caa13dbb69e57a46 2024-11-08T00:37:54,375 DEBUG [RS_OPEN_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1060): Cleaning up temporary data for 21fe38dceb9bbbd8caa13dbb69e57a46 2024-11-08T00:37:54,377 DEBUG [RS_OPEN_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1093): writing seq id for 21fe38dceb9bbbd8caa13dbb69e57a46 2024-11-08T00:37:54,378 INFO [RS_OPEN_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1114): Opened 21fe38dceb9bbbd8caa13dbb69e57a46; next sequenceid=131; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=742242, jitterRate=-0.05619168281555176}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-08T00:37:54,378 DEBUG [RS_OPEN_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 21fe38dceb9bbbd8caa13dbb69e57a46 2024-11-08T00:37:54,378 DEBUG [RS_OPEN_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1006): Region open journal for 21fe38dceb9bbbd8caa13dbb69e57a46: Running coprocessor pre-open hook at 1731026274352Writing region info on filesystem at 1731026274352Initializing all the Stores at 1731026274353 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731026274353Cleaning up temporary data from old regions at 1731026274375 (+22 ms)Running coprocessor post-open hooks at 1731026274378 (+3 ms)Region opened successfully at 1731026274378 2024-11-08T00:37:54,379 INFO [RS_OPEN_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,,1731026273707.21fe38dceb9bbbd8caa13dbb69e57a46., pid=12, masterSystemTime=1731026274345 2024-11-08T00:37:54,379 DEBUG [RS_OPEN_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.CompactSplit(403): Add compact mark for store 21fe38dceb9bbbd8caa13dbb69e57a46:info, priority=-2147483648, current under compaction store size is 1 2024-11-08T00:37:54,379 DEBUG [RS_OPEN_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: Opening Region; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-08T00:37:54,379 DEBUG [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 1 store files, 0 compacting, 1 eligible, 16 blocking 2024-11-08T00:37:54,380 INFO [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] regionserver.HStore(1527): Keeping/Overriding Compaction request priority to -2147482648 for CF info since it belongs to recently split daughter region TestLogRolling-testLogRolling,,1731026273707.21fe38dceb9bbbd8caa13dbb69e57a46. 2024-11-08T00:37:54,380 DEBUG [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] regionserver.HStore(1541): 21fe38dceb9bbbd8caa13dbb69e57a46/info is initiating minor compaction (all files) 2024-11-08T00:37:54,380 INFO [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 21fe38dceb9bbbd8caa13dbb69e57a46/info in TestLogRolling-testLogRolling,,1731026273707.21fe38dceb9bbbd8caa13dbb69e57a46. 2024-11-08T00:37:54,380 INFO [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/21fe38dceb9bbbd8caa13dbb69e57a46/info/3d891a0fe3384a94ac90209c3eba3c60.5e8733a307b5de0ca2decb5cd91d5420->hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/5e8733a307b5de0ca2decb5cd91d5420/info/3d891a0fe3384a94ac90209c3eba3c60-bottom] into tmpdir=hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/21fe38dceb9bbbd8caa13dbb69e57a46/.tmp, totalSize=85.3 K 2024-11-08T00:37:54,381 DEBUG [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] compactions.Compactor(225): Compacting 3d891a0fe3384a94ac90209c3eba3c60.5e8733a307b5de0ca2decb5cd91d5420, keycount=38, bloomtype=ROW, size=85.3 K, encoding=NONE, compression=NONE, seqNum=100, earliestPutTs=1731026269468 2024-11-08T00:37:54,381 DEBUG [RS_OPEN_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,,1731026273707.21fe38dceb9bbbd8caa13dbb69e57a46. 2024-11-08T00:37:54,381 INFO [RS_OPEN_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,,1731026273707.21fe38dceb9bbbd8caa13dbb69e57a46. 2024-11-08T00:37:54,382 INFO [RS_OPEN_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,row0062,1731026273707.67bc71518d7e759aa09a87781b7c9b7d. 2024-11-08T00:37:54,382 DEBUG [RS_OPEN_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7752): Opening region: {ENCODED => 67bc71518d7e759aa09a87781b7c9b7d, NAME => 'TestLogRolling-testLogRolling,row0062,1731026273707.67bc71518d7e759aa09a87781b7c9b7d.', STARTKEY => 'row0062', ENDKEY => ''} 2024-11-08T00:37:54,382 DEBUG [RS_OPEN_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling 67bc71518d7e759aa09a87781b7c9b7d 2024-11-08T00:37:54,382 DEBUG [RS_OPEN_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,row0062,1731026273707.67bc71518d7e759aa09a87781b7c9b7d.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-08T00:37:54,382 DEBUG [RS_OPEN_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7794): checking encryption for 67bc71518d7e759aa09a87781b7c9b7d 2024-11-08T00:37:54,382 DEBUG [RS_OPEN_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7797): checking classloading for 67bc71518d7e759aa09a87781b7c9b7d 2024-11-08T00:37:54,382 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=10 updating hbase:meta row=21fe38dceb9bbbd8caa13dbb69e57a46, regionState=OPEN, openSeqNum=131, regionLocation=3302f0f507bd,37459,1731026258169 2024-11-08T00:37:54,383 INFO [StoreOpener-67bc71518d7e759aa09a87781b7c9b7d-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 67bc71518d7e759aa09a87781b7c9b7d 2024-11-08T00:37:54,384 INFO [StoreOpener-67bc71518d7e759aa09a87781b7c9b7d-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 67bc71518d7e759aa09a87781b7c9b7d columnFamilyName info 2024-11-08T00:37:54,384 DEBUG [StoreOpener-67bc71518d7e759aa09a87781b7c9b7d-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T00:37:54,384 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37459 {}] regionserver.HRegion(8855): Flush requested on 1588230740 2024-11-08T00:37:54,384 DEBUG [MemStoreFlusher.0 {}] regionserver.FlushAllLargeStoresPolicy(69): Since none of the CFs were above the size, flushing all. 2024-11-08T00:37:54,384 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=5.11 KB heapSize=8.96 KB 2024-11-08T00:37:54,384 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=12, ppid=10, state=RUNNABLE, hasLock=false; OpenRegionProcedure 21fe38dceb9bbbd8caa13dbb69e57a46, server=3302f0f507bd,37459,1731026258169 because future has completed 2024-11-08T00:37:54,388 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=12, resume processing ppid=10 2024-11-08T00:37:54,388 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, ppid=10, state=SUCCESS, hasLock=false; OpenRegionProcedure 21fe38dceb9bbbd8caa13dbb69e57a46, server=3302f0f507bd,37459,1731026258169 in 193 msec 2024-11-08T00:37:54,390 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=21fe38dceb9bbbd8caa13dbb69e57a46, ASSIGN in 351 msec 2024-11-08T00:37:54,399 INFO [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 21fe38dceb9bbbd8caa13dbb69e57a46#info#compaction#67 average throughput is 31.30 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-08T00:37:54,400 DEBUG [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/21fe38dceb9bbbd8caa13dbb69e57a46/.tmp/info/a12265afb17049ca9fde2d5081f45add is 1080, key is row0001/info:/1731026269468/Put/seqid=0 2024-11-08T00:37:54,401 DEBUG [StoreOpener-67bc71518d7e759aa09a87781b7c9b7d-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/info/3d891a0fe3384a94ac90209c3eba3c60.5e8733a307b5de0ca2decb5cd91d5420->hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/5e8733a307b5de0ca2decb5cd91d5420/info/3d891a0fe3384a94ac90209c3eba3c60-top 2024-11-08T00:37:54,405 DEBUG [StoreOpener-67bc71518d7e759aa09a87781b7c9b7d-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/info/TestLogRolling-testLogRolling=5e8733a307b5de0ca2decb5cd91d5420-7a53a813528c40669e5144480a39fd8a 2024-11-08T00:37:54,407 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43605 is added to blk_1073741853_1029 (size=70862) 2024-11-08T00:37:54,407 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33277 is added to blk_1073741853_1029 (size=70862) 2024-11-08T00:37:54,410 DEBUG [StoreOpener-67bc71518d7e759aa09a87781b7c9b7d-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/info/TestLogRolling-testLogRolling=5e8733a307b5de0ca2decb5cd91d5420-e45604636afb46ed9fcfe82f0d6c579d 2024-11-08T00:37:54,410 INFO [StoreOpener-67bc71518d7e759aa09a87781b7c9b7d-1 {}] regionserver.HStore(327): Store=67bc71518d7e759aa09a87781b7c9b7d/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-08T00:37:54,410 DEBUG [RS_OPEN_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1038): replaying wal for 67bc71518d7e759aa09a87781b7c9b7d 2024-11-08T00:37:54,411 DEBUG [RS_OPEN_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d 2024-11-08T00:37:54,412 DEBUG [RS_OPEN_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d 2024-11-08T00:37:54,412 DEBUG [RS_OPEN_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1048): stopping wal replay for 67bc71518d7e759aa09a87781b7c9b7d 2024-11-08T00:37:54,412 DEBUG [RS_OPEN_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1060): Cleaning up temporary data for 67bc71518d7e759aa09a87781b7c9b7d 2024-11-08T00:37:54,413 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/hbase/meta/1588230740/.tmp/info/0c86cd7e7479455faae824a41f97493f is 193, key is TestLogRolling-testLogRolling,row0062,1731026273707.67bc71518d7e759aa09a87781b7c9b7d./info:regioninfo/1731026274190/Put/seqid=0 2024-11-08T00:37:54,413 DEBUG [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/21fe38dceb9bbbd8caa13dbb69e57a46/.tmp/info/a12265afb17049ca9fde2d5081f45add as hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/21fe38dceb9bbbd8caa13dbb69e57a46/info/a12265afb17049ca9fde2d5081f45add 2024-11-08T00:37:54,414 DEBUG [RS_OPEN_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1093): writing seq id for 67bc71518d7e759aa09a87781b7c9b7d 2024-11-08T00:37:54,415 INFO [RS_OPEN_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1114): Opened 67bc71518d7e759aa09a87781b7c9b7d; next sequenceid=131; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=870309, jitterRate=0.10665558278560638}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-08T00:37:54,415 DEBUG [RS_OPEN_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 67bc71518d7e759aa09a87781b7c9b7d 2024-11-08T00:37:54,415 DEBUG [RS_OPEN_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1006): Region open journal for 67bc71518d7e759aa09a87781b7c9b7d: Running coprocessor pre-open hook at 1731026274382Writing region info on filesystem at 1731026274382Initializing all the Stores at 1731026274383 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731026274383Cleaning up temporary data from old regions at 1731026274412 (+29 ms)Running coprocessor post-open hooks at 1731026274415 (+3 ms)Region opened successfully at 1731026274415 2024-11-08T00:37:54,416 INFO [RS_OPEN_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,row0062,1731026273707.67bc71518d7e759aa09a87781b7c9b7d., pid=13, masterSystemTime=1731026274345 2024-11-08T00:37:54,416 DEBUG [RS_OPEN_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.CompactSplit(403): Add compact mark for store 67bc71518d7e759aa09a87781b7c9b7d:info, priority=-2147483648, current under compaction store size is 2 2024-11-08T00:37:54,416 DEBUG [RS_OPEN_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: Opening Region; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-08T00:37:54,416 DEBUG [RS:0;3302f0f507bd:37459-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-08T00:37:54,417 INFO [RS:0;3302f0f507bd:37459-longCompactions-0 {}] regionserver.HStore(1527): Keeping/Overriding Compaction request priority to -2147482648 for CF info since it belongs to recently split daughter region TestLogRolling-testLogRolling,row0062,1731026273707.67bc71518d7e759aa09a87781b7c9b7d. 2024-11-08T00:37:54,417 DEBUG [RS:0;3302f0f507bd:37459-longCompactions-0 {}] regionserver.HStore(1541): 67bc71518d7e759aa09a87781b7c9b7d/info is initiating minor compaction (all files) 2024-11-08T00:37:54,417 INFO [RS:0;3302f0f507bd:37459-longCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 67bc71518d7e759aa09a87781b7c9b7d/info in TestLogRolling-testLogRolling,row0062,1731026273707.67bc71518d7e759aa09a87781b7c9b7d. 2024-11-08T00:37:54,417 INFO [RS:0;3302f0f507bd:37459-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/info/3d891a0fe3384a94ac90209c3eba3c60.5e8733a307b5de0ca2decb5cd91d5420->hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/5e8733a307b5de0ca2decb5cd91d5420/info/3d891a0fe3384a94ac90209c3eba3c60-top, hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/info/TestLogRolling-testLogRolling=5e8733a307b5de0ca2decb5cd91d5420-7a53a813528c40669e5144480a39fd8a, hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/info/TestLogRolling-testLogRolling=5e8733a307b5de0ca2decb5cd91d5420-e45604636afb46ed9fcfe82f0d6c579d] into tmpdir=hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/.tmp, totalSize=116.0 K 2024-11-08T00:37:54,418 DEBUG [RS:0;3302f0f507bd:37459-longCompactions-0 {}] compactions.Compactor(225): Compacting 3d891a0fe3384a94ac90209c3eba3c60.5e8733a307b5de0ca2decb5cd91d5420, keycount=38, bloomtype=ROW, size=85.3 K, encoding=NONE, compression=NONE, seqNum=101, earliestPutTs=1731026269468 2024-11-08T00:37:54,418 DEBUG [RS_OPEN_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,row0062,1731026273707.67bc71518d7e759aa09a87781b7c9b7d. 2024-11-08T00:37:54,418 INFO [RS_OPEN_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,row0062,1731026273707.67bc71518d7e759aa09a87781b7c9b7d. 2024-11-08T00:37:54,418 DEBUG [RS:0;3302f0f507bd:37459-longCompactions-0 {}] compactions.Compactor(225): Compacting TestLogRolling-testLogRolling=5e8733a307b5de0ca2decb5cd91d5420-7a53a813528c40669e5144480a39fd8a, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=115, earliestPutTs=1731026273641 2024-11-08T00:37:54,419 DEBUG [RS:0;3302f0f507bd:37459-longCompactions-0 {}] compactions.Compactor(225): Compacting TestLogRolling-testLogRolling=5e8733a307b5de0ca2decb5cd91d5420-e45604636afb46ed9fcfe82f0d6c579d, keycount=8, bloomtype=ROW, size=13.3 K, encoding=NONE, compression=NONE, seqNum=127, earliestPutTs=1731026273666 2024-11-08T00:37:54,419 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=11 updating hbase:meta row=67bc71518d7e759aa09a87781b7c9b7d, regionState=OPEN, openSeqNum=131, regionLocation=3302f0f507bd,37459,1731026258169 2024-11-08T00:37:54,419 INFO [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 1 (all) file(s) in 21fe38dceb9bbbd8caa13dbb69e57a46/info of 21fe38dceb9bbbd8caa13dbb69e57a46 into a12265afb17049ca9fde2d5081f45add(size=69.2 K), total size for store is 69.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-08T00:37:54,420 DEBUG [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 21fe38dceb9bbbd8caa13dbb69e57a46: 2024-11-08T00:37:54,420 INFO [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1731026273707.21fe38dceb9bbbd8caa13dbb69e57a46., storeName=21fe38dceb9bbbd8caa13dbb69e57a46/info, priority=15, startTime=1731026274379; duration=0sec 2024-11-08T00:37:54,420 DEBUG [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-08T00:37:54,420 DEBUG [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 21fe38dceb9bbbd8caa13dbb69e57a46:info 2024-11-08T00:37:54,421 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=13, ppid=11, state=RUNNABLE, hasLock=false; OpenRegionProcedure 67bc71518d7e759aa09a87781b7c9b7d, server=3302f0f507bd,37459,1731026258169 because future has completed 2024-11-08T00:37:54,421 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43605 is added to blk_1073741854_1030 (size=9847) 2024-11-08T00:37:54,421 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33277 is added to blk_1073741854_1030 (size=9847) 2024-11-08T00:37:54,422 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.92 KB at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/hbase/meta/1588230740/.tmp/info/0c86cd7e7479455faae824a41f97493f 2024-11-08T00:37:54,425 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=13, resume processing ppid=11 2024-11-08T00:37:54,425 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, ppid=11, state=SUCCESS, hasLock=false; OpenRegionProcedure 67bc71518d7e759aa09a87781b7c9b7d, server=3302f0f507bd,37459,1731026258169 in 228 msec 2024-11-08T00:37:54,428 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=11, resume processing ppid=7 2024-11-08T00:37:54,428 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=67bc71518d7e759aa09a87781b7c9b7d, ASSIGN in 388 msec 2024-11-08T00:37:54,430 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=5e8733a307b5de0ca2decb5cd91d5420, daughterA=21fe38dceb9bbbd8caa13dbb69e57a46, daughterB=67bc71518d7e759aa09a87781b7c9b7d in 721 msec 2024-11-08T00:37:54,444 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/hbase/meta/1588230740/.tmp/ns/47fe697389b040599e6bd7ac7a2ff31f is 43, key is default/ns:d/1731026259292/Put/seqid=0 2024-11-08T00:37:54,445 INFO [RS:0;3302f0f507bd:37459-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 67bc71518d7e759aa09a87781b7c9b7d#info#compaction#70 average throughput is 17.96 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-08T00:37:54,445 DEBUG [RS:0;3302f0f507bd:37459-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/.tmp/info/ec7860c3f7a34c169486472cc402a10d is 1080, key is row0062/info:/1731026271602/Put/seqid=0 2024-11-08T00:37:54,449 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33277 is added to blk_1073741855_1031 (size=5153) 2024-11-08T00:37:54,449 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43605 is added to blk_1073741855_1031 (size=5153) 2024-11-08T00:37:54,449 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/hbase/meta/1588230740/.tmp/ns/47fe697389b040599e6bd7ac7a2ff31f 2024-11-08T00:37:54,453 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43605 is added to blk_1073741856_1032 (size=42984) 2024-11-08T00:37:54,454 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33277 is added to blk_1073741856_1032 (size=42984) 2024-11-08T00:37:54,460 DEBUG [RS:0;3302f0f507bd:37459-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/.tmp/info/ec7860c3f7a34c169486472cc402a10d as hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/info/ec7860c3f7a34c169486472cc402a10d 2024-11-08T00:37:54,465 INFO [RS:0;3302f0f507bd:37459-longCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 67bc71518d7e759aa09a87781b7c9b7d/info of 67bc71518d7e759aa09a87781b7c9b7d into ec7860c3f7a34c169486472cc402a10d(size=42.0 K), total size for store is 42.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-08T00:37:54,466 DEBUG [RS:0;3302f0f507bd:37459-longCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 67bc71518d7e759aa09a87781b7c9b7d: 2024-11-08T00:37:54,466 INFO [RS:0;3302f0f507bd:37459-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731026273707.67bc71518d7e759aa09a87781b7c9b7d., storeName=67bc71518d7e759aa09a87781b7c9b7d/info, priority=13, startTime=1731026274416; duration=0sec 2024-11-08T00:37:54,466 DEBUG [RS:0;3302f0f507bd:37459-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-08T00:37:54,466 DEBUG [RS:0;3302f0f507bd:37459-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 67bc71518d7e759aa09a87781b7c9b7d:info 2024-11-08T00:37:54,469 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/hbase/meta/1588230740/.tmp/table/2c1bce0ff505462299f02e3c15f6e0f6 is 65, key is TestLogRolling-testLogRolling/table:state/1731026259796/Put/seqid=0 2024-11-08T00:37:54,473 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33277 is added to blk_1073741857_1033 (size=5340) 2024-11-08T00:37:54,473 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43605 is added to blk_1073741857_1033 (size=5340) 2024-11-08T00:37:54,474 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=122 B at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/hbase/meta/1588230740/.tmp/table/2c1bce0ff505462299f02e3c15f6e0f6 2024-11-08T00:37:54,479 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/hbase/meta/1588230740/.tmp/info/0c86cd7e7479455faae824a41f97493f as hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/hbase/meta/1588230740/info/0c86cd7e7479455faae824a41f97493f 2024-11-08T00:37:54,483 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/hbase/meta/1588230740/info/0c86cd7e7479455faae824a41f97493f, entries=30, sequenceid=17, filesize=9.6 K 2024-11-08T00:37:54,484 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/hbase/meta/1588230740/.tmp/ns/47fe697389b040599e6bd7ac7a2ff31f as hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/hbase/meta/1588230740/ns/47fe697389b040599e6bd7ac7a2ff31f 2024-11-08T00:37:54,489 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/hbase/meta/1588230740/ns/47fe697389b040599e6bd7ac7a2ff31f, entries=2, sequenceid=17, filesize=5.0 K 2024-11-08T00:37:54,490 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/hbase/meta/1588230740/.tmp/table/2c1bce0ff505462299f02e3c15f6e0f6 as hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/hbase/meta/1588230740/table/2c1bce0ff505462299f02e3c15f6e0f6 2024-11-08T00:37:54,495 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/hbase/meta/1588230740/table/2c1bce0ff505462299f02e3c15f6e0f6, entries=2, sequenceid=17, filesize=5.2 K 2024-11-08T00:37:54,496 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~5.11 KB/5234, heapSize ~8.66 KB/8872, currentSize=705 B/705 for 1588230740 in 112ms, sequenceid=17, compaction requested=false 2024-11-08T00:37:54,496 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-08T00:37:54,745 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.meta.1731026116690.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:37:54,745 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,39151,1731026116929/3302f0f507bd%2C39151%2C1731026116929.1731026117246 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:37:55,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37459 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.3:58702 deadline: 1731026285687, exception=org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1731026259417.5e8733a307b5de0ca2decb5cd91d5420. is not online on 3302f0f507bd,37459,1731026258169 2024-11-08T00:37:55,699 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,,1731026259417.5e8733a307b5de0ca2decb5cd91d5420., hostname=3302f0f507bd,37459,1731026258169, seqNum=2 , the old value is region=TestLogRolling-testLogRolling,,1731026259417.5e8733a307b5de0ca2decb5cd91d5420., hostname=3302f0f507bd,37459,1731026258169, seqNum=2, error=org.apache.hadoop.hbase.NotServingRegionException: org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1731026259417.5e8733a307b5de0ca2decb5cd91d5420. is not online on 3302f0f507bd,37459,1731026258169 at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegionByEncodedName(HRegionServer.java:3186) at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegion(HRegionServer.java:3164) at org.apache.hadoop.hbase.regionserver.RSRpcServices.getRegion(RSRpcServices.java:1413) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2943) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-08T00:37:55,700 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,,1731026259417.5e8733a307b5de0ca2decb5cd91d5420., hostname=3302f0f507bd,37459,1731026258169, seqNum=2 is org.apache.hadoop.hbase.NotServingRegionException: org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1731026259417.5e8733a307b5de0ca2decb5cd91d5420. is not online on 3302f0f507bd,37459,1731026258169 at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegionByEncodedName(HRegionServer.java:3186) at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegion(HRegionServer.java:3164) at org.apache.hadoop.hbase.regionserver.RSRpcServices.getRegion(RSRpcServices.java:1413) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2943) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-08T00:37:55,700 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(88): Try removing region=TestLogRolling-testLogRolling,,1731026259417.5e8733a307b5de0ca2decb5cd91d5420., hostname=3302f0f507bd,37459,1731026258169, seqNum=2 from cache 2024-11-08T00:37:55,746 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.meta.1731026116690.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:37:55,746 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,39151,1731026116929/3302f0f507bd%2C39151%2C1731026116929.1731026117246 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:37:56,746 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.meta.1731026116690.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:37:56,746 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,39151,1731026116929/3302f0f507bd%2C39151%2C1731026116929.1731026117246 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:37:57,747 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,39151,1731026116929/3302f0f507bd%2C39151%2C1731026116929.1731026117246 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:37:57,747 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.meta.1731026116690.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:37:58,748 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.meta.1731026116690.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:37:58,748 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,39151,1731026116929/3302f0f507bd%2C39151%2C1731026116929.1731026117246 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:37:58,936 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:37:58,937 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:37:58,937 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:37:58,938 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:37:58,939 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:37:58,940 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:37:58,942 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:37:58,942 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:37:58,964 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:37:58,964 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:37:58,964 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:37:58,964 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:37:58,964 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:37:58,965 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:37:58,968 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:37:58,968 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:37:58,968 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:37:58,970 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:37:59,477 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-08T00:37:59,478 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:37:59,478 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:37:59,478 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:37:59,479 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:37:59,480 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:37:59,480 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:37:59,481 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:37:59,481 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:37:59,506 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:37:59,507 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:37:59,507 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:37:59,507 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:37:59,507 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:37:59,507 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:37:59,510 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:37:59,511 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:37:59,511 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:37:59,513 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:37:59,749 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.meta.1731026116690.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:37:59,750 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,39151,1731026116929/3302f0f507bd%2C39151%2C1731026116929.1731026117246 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:38:00,750 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.meta.1731026116690.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:38:00,750 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,39151,1731026116929/3302f0f507bd%2C39151%2C1731026116929.1731026117246 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:38:01,751 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,39151,1731026116929/3302f0f507bd%2C39151%2C1731026116929.1731026117246 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:38:01,751 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.meta.1731026116690.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:38:02,753 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.meta.1731026116690.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:38:02,753 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,39151,1731026116929/3302f0f507bd%2C39151%2C1731026116929.1731026117246 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:38:03,754 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.meta.1731026116690.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:38:03,754 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,39151,1731026116929/3302f0f507bd%2C39151%2C1731026116929.1731026117246 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:38:04,755 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.meta.1731026116690.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:38:04,755 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,39151,1731026116929/3302f0f507bd%2C39151%2C1731026116929.1731026117246 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:38:05,757 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.meta.1731026116690.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:38:05,757 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,39151,1731026116929/3302f0f507bd%2C39151%2C1731026116929.1731026117246 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:38:05,815 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRolling', row='row0097', locateType=CURRENT is [region=TestLogRolling-testLogRolling,row0062,1731026273707.67bc71518d7e759aa09a87781b7c9b7d., hostname=3302f0f507bd,37459,1731026258169, seqNum=131] 2024-11-08T00:38:05,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37459 {}] regionserver.HRegion(8855): Flush requested on 67bc71518d7e759aa09a87781b7c9b7d 2024-11-08T00:38:05,827 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 67bc71518d7e759aa09a87781b7c9b7d 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-08T00:38:05,832 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/.tmp/info/3d36b088c61047498c14861d4efae21a is 1080, key is row0097/info:/1731026285816/Put/seqid=0 2024-11-08T00:38:05,837 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33277 is added to blk_1073741858_1034 (size=12516) 2024-11-08T00:38:05,837 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43605 is added to blk_1073741858_1034 (size=12516) 2024-11-08T00:38:05,837 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=141 (bloomFilter=true), to=hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/.tmp/info/3d36b088c61047498c14861d4efae21a 2024-11-08T00:38:05,843 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/.tmp/info/3d36b088c61047498c14861d4efae21a as hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/info/3d36b088c61047498c14861d4efae21a 2024-11-08T00:38:05,849 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/info/3d36b088c61047498c14861d4efae21a, entries=7, sequenceid=141, filesize=12.2 K 2024-11-08T00:38:05,850 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=11.56 KB/11836 for 67bc71518d7e759aa09a87781b7c9b7d in 23ms, sequenceid=141, compaction requested=false 2024-11-08T00:38:05,850 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 67bc71518d7e759aa09a87781b7c9b7d: 2024-11-08T00:38:05,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37459 {}] regionserver.HRegion(8855): Flush requested on 67bc71518d7e759aa09a87781b7c9b7d 2024-11-08T00:38:05,851 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 67bc71518d7e759aa09a87781b7c9b7d 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-11-08T00:38:05,857 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/.tmp/info/c646d1b977b249ada09be6ff745e0b3f is 1080, key is row0104/info:/1731026285828/Put/seqid=0 2024-11-08T00:38:05,862 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33277 is added to blk_1073741859_1035 (size=17906) 2024-11-08T00:38:05,862 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43605 is added to blk_1073741859_1035 (size=17906) 2024-11-08T00:38:05,863 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=156 (bloomFilter=true), to=hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/.tmp/info/c646d1b977b249ada09be6ff745e0b3f 2024-11-08T00:38:05,869 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/.tmp/info/c646d1b977b249ada09be6ff745e0b3f as hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/info/c646d1b977b249ada09be6ff745e0b3f 2024-11-08T00:38:05,874 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/info/c646d1b977b249ada09be6ff745e0b3f, entries=12, sequenceid=156, filesize=17.5 K 2024-11-08T00:38:05,875 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=11.56 KB/11836 for 67bc71518d7e759aa09a87781b7c9b7d in 24ms, sequenceid=156, compaction requested=true 2024-11-08T00:38:05,875 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 67bc71518d7e759aa09a87781b7c9b7d: 2024-11-08T00:38:05,875 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 67bc71518d7e759aa09a87781b7c9b7d:info, priority=-2147483648, current under compaction store size is 1 2024-11-08T00:38:05,875 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-08T00:38:05,875 DEBUG [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-08T00:38:05,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37459 {}] regionserver.HRegion(8855): Flush requested on 67bc71518d7e759aa09a87781b7c9b7d 2024-11-08T00:38:05,876 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 67bc71518d7e759aa09a87781b7c9b7d 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-11-08T00:38:05,877 DEBUG [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 73406 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-08T00:38:05,878 DEBUG [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] regionserver.HStore(1541): 67bc71518d7e759aa09a87781b7c9b7d/info is initiating minor compaction (all files) 2024-11-08T00:38:05,878 INFO [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 67bc71518d7e759aa09a87781b7c9b7d/info in TestLogRolling-testLogRolling,row0062,1731026273707.67bc71518d7e759aa09a87781b7c9b7d. 2024-11-08T00:38:05,878 INFO [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/info/ec7860c3f7a34c169486472cc402a10d, hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/info/3d36b088c61047498c14861d4efae21a, hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/info/c646d1b977b249ada09be6ff745e0b3f] into tmpdir=hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/.tmp, totalSize=71.7 K 2024-11-08T00:38:05,878 DEBUG [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] compactions.Compactor(225): Compacting ec7860c3f7a34c169486472cc402a10d, keycount=35, bloomtype=ROW, size=42.0 K, encoding=NONE, compression=NONE, seqNum=127, earliestPutTs=1731026271602 2024-11-08T00:38:05,878 DEBUG [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] compactions.Compactor(225): Compacting 3d36b088c61047498c14861d4efae21a, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=141, earliestPutTs=1731026285816 2024-11-08T00:38:05,879 DEBUG [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] compactions.Compactor(225): Compacting c646d1b977b249ada09be6ff745e0b3f, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=156, earliestPutTs=1731026285828 2024-11-08T00:38:05,879 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/.tmp/info/2088b56bbf524b679d2ef8da6103e683 is 1080, key is row0116/info:/1731026285852/Put/seqid=0 2024-11-08T00:38:05,884 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43605 is added to blk_1073741860_1036 (size=17906) 2024-11-08T00:38:05,884 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33277 is added to blk_1073741860_1036 (size=17906) 2024-11-08T00:38:05,885 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=171 (bloomFilter=true), to=hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/.tmp/info/2088b56bbf524b679d2ef8da6103e683 2024-11-08T00:38:05,892 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/.tmp/info/2088b56bbf524b679d2ef8da6103e683 as hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/info/2088b56bbf524b679d2ef8da6103e683 2024-11-08T00:38:05,892 INFO [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 67bc71518d7e759aa09a87781b7c9b7d#info#compaction#75 average throughput is 55.41 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-08T00:38:05,893 DEBUG [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/.tmp/info/bf4e8d1caa3d4974a673090d869514e3 is 1080, key is row0062/info:/1731026271602/Put/seqid=0 2024-11-08T00:38:05,897 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/info/2088b56bbf524b679d2ef8da6103e683, entries=12, sequenceid=171, filesize=17.5 K 2024-11-08T00:38:05,899 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=1.05 KB/1076 for 67bc71518d7e759aa09a87781b7c9b7d in 22ms, sequenceid=171, compaction requested=false 2024-11-08T00:38:05,899 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 67bc71518d7e759aa09a87781b7c9b7d: 2024-11-08T00:38:05,904 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43605 is added to blk_1073741861_1037 (size=63636) 2024-11-08T00:38:05,904 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33277 is added to blk_1073741861_1037 (size=63636) 2024-11-08T00:38:05,910 DEBUG [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/.tmp/info/bf4e8d1caa3d4974a673090d869514e3 as hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/info/bf4e8d1caa3d4974a673090d869514e3 2024-11-08T00:38:05,915 INFO [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 67bc71518d7e759aa09a87781b7c9b7d/info of 67bc71518d7e759aa09a87781b7c9b7d into bf4e8d1caa3d4974a673090d869514e3(size=62.1 K), total size for store is 79.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-08T00:38:05,915 DEBUG [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 67bc71518d7e759aa09a87781b7c9b7d: 2024-11-08T00:38:05,915 INFO [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731026273707.67bc71518d7e759aa09a87781b7c9b7d., storeName=67bc71518d7e759aa09a87781b7c9b7d/info, priority=13, startTime=1731026285875; duration=0sec 2024-11-08T00:38:05,915 DEBUG [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-08T00:38:05,915 DEBUG [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 67bc71518d7e759aa09a87781b7c9b7d:info 2024-11-08T00:38:06,758 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,39151,1731026116929/3302f0f507bd%2C39151%2C1731026116929.1731026117246 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:38:06,758 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.meta.1731026116690.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:38:07,759 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.meta.1731026116690.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:38:07,759 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,39151,1731026116929/3302f0f507bd%2C39151%2C1731026116929.1731026117246 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:38:07,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37459 {}] regionserver.HRegion(8855): Flush requested on 67bc71518d7e759aa09a87781b7c9b7d 2024-11-08T00:38:07,897 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 67bc71518d7e759aa09a87781b7c9b7d 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-08T00:38:07,901 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/.tmp/info/5f9603a4633540188581d4c34a7ac7ca is 1080, key is row0128/info:/1731026285877/Put/seqid=0 2024-11-08T00:38:07,906 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43605 is added to blk_1073741862_1038 (size=12516) 2024-11-08T00:38:07,906 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33277 is added to blk_1073741862_1038 (size=12516) 2024-11-08T00:38:07,906 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=182 (bloomFilter=true), to=hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/.tmp/info/5f9603a4633540188581d4c34a7ac7ca 2024-11-08T00:38:07,911 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/.tmp/info/5f9603a4633540188581d4c34a7ac7ca as hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/info/5f9603a4633540188581d4c34a7ac7ca 2024-11-08T00:38:07,917 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/info/5f9603a4633540188581d4c34a7ac7ca, entries=7, sequenceid=182, filesize=12.2 K 2024-11-08T00:38:07,917 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=10.51 KB/10760 for 67bc71518d7e759aa09a87781b7c9b7d in 20ms, sequenceid=182, compaction requested=true 2024-11-08T00:38:07,917 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 67bc71518d7e759aa09a87781b7c9b7d: 2024-11-08T00:38:07,917 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 67bc71518d7e759aa09a87781b7c9b7d:info, priority=-2147483648, current under compaction store size is 1 2024-11-08T00:38:07,918 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-08T00:38:07,918 DEBUG [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-08T00:38:07,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37459 {}] regionserver.HRegion(8855): Flush requested on 67bc71518d7e759aa09a87781b7c9b7d 2024-11-08T00:38:07,918 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 67bc71518d7e759aa09a87781b7c9b7d 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-11-08T00:38:07,919 DEBUG [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 94058 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-08T00:38:07,919 DEBUG [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] regionserver.HStore(1541): 67bc71518d7e759aa09a87781b7c9b7d/info is initiating minor compaction (all files) 2024-11-08T00:38:07,919 INFO [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 67bc71518d7e759aa09a87781b7c9b7d/info in TestLogRolling-testLogRolling,row0062,1731026273707.67bc71518d7e759aa09a87781b7c9b7d. 2024-11-08T00:38:07,919 INFO [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/info/bf4e8d1caa3d4974a673090d869514e3, hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/info/2088b56bbf524b679d2ef8da6103e683, hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/info/5f9603a4633540188581d4c34a7ac7ca] into tmpdir=hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/.tmp, totalSize=91.9 K 2024-11-08T00:38:07,919 DEBUG [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] compactions.Compactor(225): Compacting bf4e8d1caa3d4974a673090d869514e3, keycount=54, bloomtype=ROW, size=62.1 K, encoding=NONE, compression=NONE, seqNum=156, earliestPutTs=1731026271602 2024-11-08T00:38:07,920 DEBUG [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] compactions.Compactor(225): Compacting 2088b56bbf524b679d2ef8da6103e683, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=171, earliestPutTs=1731026285852 2024-11-08T00:38:07,920 DEBUG [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] compactions.Compactor(225): Compacting 5f9603a4633540188581d4c34a7ac7ca, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=182, earliestPutTs=1731026285877 2024-11-08T00:38:07,922 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/.tmp/info/fa7ca6a2f74d46c780264b6871d9df95 is 1080, key is row0135/info:/1731026287898/Put/seqid=0 2024-11-08T00:38:07,926 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43605 is added to blk_1073741863_1039 (size=16828) 2024-11-08T00:38:07,926 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33277 is added to blk_1073741863_1039 (size=16828) 2024-11-08T00:38:07,927 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=196 (bloomFilter=true), to=hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/.tmp/info/fa7ca6a2f74d46c780264b6871d9df95 2024-11-08T00:38:07,932 INFO [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 67bc71518d7e759aa09a87781b7c9b7d#info#compaction#78 average throughput is 74.91 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-08T00:38:07,932 DEBUG [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/.tmp/info/08a563b83d754cfe9c4feb7c1457eb5f is 1080, key is row0062/info:/1731026271602/Put/seqid=0 2024-11-08T00:38:07,933 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/.tmp/info/fa7ca6a2f74d46c780264b6871d9df95 as hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/info/fa7ca6a2f74d46c780264b6871d9df95 2024-11-08T00:38:07,935 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33277 is added to blk_1073741864_1040 (size=84293) 2024-11-08T00:38:07,935 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43605 is added to blk_1073741864_1040 (size=84293) 2024-11-08T00:38:07,939 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/info/fa7ca6a2f74d46c780264b6871d9df95, entries=11, sequenceid=196, filesize=16.4 K 2024-11-08T00:38:07,940 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=8.41 KB/8608 for 67bc71518d7e759aa09a87781b7c9b7d in 22ms, sequenceid=196, compaction requested=false 2024-11-08T00:38:07,940 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 67bc71518d7e759aa09a87781b7c9b7d: 2024-11-08T00:38:07,941 DEBUG [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/.tmp/info/08a563b83d754cfe9c4feb7c1457eb5f as hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/info/08a563b83d754cfe9c4feb7c1457eb5f 2024-11-08T00:38:07,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37459 {}] regionserver.HRegion(8855): Flush requested on 67bc71518d7e759aa09a87781b7c9b7d 2024-11-08T00:38:07,942 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 67bc71518d7e759aa09a87781b7c9b7d 1/1 column families, dataSize=10.51 KB heapSize=11.50 KB 2024-11-08T00:38:07,946 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/.tmp/info/a0853d9bcc19480ea523da46ef113733 is 1080, key is row0146/info:/1731026287920/Put/seqid=0 2024-11-08T00:38:07,947 INFO [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 67bc71518d7e759aa09a87781b7c9b7d/info of 67bc71518d7e759aa09a87781b7c9b7d into 08a563b83d754cfe9c4feb7c1457eb5f(size=82.3 K), total size for store is 98.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-08T00:38:07,947 DEBUG [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 67bc71518d7e759aa09a87781b7c9b7d: 2024-11-08T00:38:07,947 INFO [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731026273707.67bc71518d7e759aa09a87781b7c9b7d., storeName=67bc71518d7e759aa09a87781b7c9b7d/info, priority=13, startTime=1731026287917; duration=0sec 2024-11-08T00:38:07,947 DEBUG [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-08T00:38:07,947 DEBUG [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 67bc71518d7e759aa09a87781b7c9b7d:info 2024-11-08T00:38:07,952 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43605 is added to blk_1073741865_1041 (size=15750) 2024-11-08T00:38:07,952 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33277 is added to blk_1073741865_1041 (size=15750) 2024-11-08T00:38:07,965 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-08T00:38:08,354 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=10.51 KB at sequenceid=209 (bloomFilter=true), to=hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/.tmp/info/a0853d9bcc19480ea523da46ef113733 2024-11-08T00:38:08,364 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/.tmp/info/a0853d9bcc19480ea523da46ef113733 as hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/info/a0853d9bcc19480ea523da46ef113733 2024-11-08T00:38:08,369 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/info/a0853d9bcc19480ea523da46ef113733, entries=10, sequenceid=209, filesize=15.4 K 2024-11-08T00:38:08,370 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~10.51 KB/10760, heapSize ~11.48 KB/11760, currentSize=5.25 KB/5380 for 67bc71518d7e759aa09a87781b7c9b7d in 428ms, sequenceid=209, compaction requested=true 2024-11-08T00:38:08,370 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 67bc71518d7e759aa09a87781b7c9b7d: 2024-11-08T00:38:08,370 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 67bc71518d7e759aa09a87781b7c9b7d:info, priority=-2147483648, current under compaction store size is 1 2024-11-08T00:38:08,370 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-08T00:38:08,370 DEBUG [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-08T00:38:08,371 DEBUG [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 116871 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-08T00:38:08,371 DEBUG [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] regionserver.HStore(1541): 67bc71518d7e759aa09a87781b7c9b7d/info is initiating minor compaction (all files) 2024-11-08T00:38:08,371 INFO [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 67bc71518d7e759aa09a87781b7c9b7d/info in TestLogRolling-testLogRolling,row0062,1731026273707.67bc71518d7e759aa09a87781b7c9b7d. 2024-11-08T00:38:08,371 INFO [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/info/08a563b83d754cfe9c4feb7c1457eb5f, hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/info/fa7ca6a2f74d46c780264b6871d9df95, hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/info/a0853d9bcc19480ea523da46ef113733] into tmpdir=hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/.tmp, totalSize=114.1 K 2024-11-08T00:38:08,372 DEBUG [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] compactions.Compactor(225): Compacting 08a563b83d754cfe9c4feb7c1457eb5f, keycount=73, bloomtype=ROW, size=82.3 K, encoding=NONE, compression=NONE, seqNum=182, earliestPutTs=1731026271602 2024-11-08T00:38:08,372 DEBUG [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] compactions.Compactor(225): Compacting fa7ca6a2f74d46c780264b6871d9df95, keycount=11, bloomtype=ROW, size=16.4 K, encoding=NONE, compression=NONE, seqNum=196, earliestPutTs=1731026287898 2024-11-08T00:38:08,373 DEBUG [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] compactions.Compactor(225): Compacting a0853d9bcc19480ea523da46ef113733, keycount=10, bloomtype=ROW, size=15.4 K, encoding=NONE, compression=NONE, seqNum=209, earliestPutTs=1731026287920 2024-11-08T00:38:08,385 INFO [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 67bc71518d7e759aa09a87781b7c9b7d#info#compaction#80 average throughput is 32.15 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-08T00:38:08,385 DEBUG [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/.tmp/info/b22caf5b9ab34cb199e1df85c57efece is 1080, key is row0062/info:/1731026271602/Put/seqid=0 2024-11-08T00:38:08,388 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43605 is added to blk_1073741866_1042 (size=107041) 2024-11-08T00:38:08,388 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33277 is added to blk_1073741866_1042 (size=107041) 2024-11-08T00:38:08,393 DEBUG [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/.tmp/info/b22caf5b9ab34cb199e1df85c57efece as hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/info/b22caf5b9ab34cb199e1df85c57efece 2024-11-08T00:38:08,410 INFO [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 67bc71518d7e759aa09a87781b7c9b7d/info of 67bc71518d7e759aa09a87781b7c9b7d into b22caf5b9ab34cb199e1df85c57efece(size=104.5 K), total size for store is 104.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-08T00:38:08,410 DEBUG [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 67bc71518d7e759aa09a87781b7c9b7d: 2024-11-08T00:38:08,410 INFO [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731026273707.67bc71518d7e759aa09a87781b7c9b7d., storeName=67bc71518d7e759aa09a87781b7c9b7d/info, priority=13, startTime=1731026288370; duration=0sec 2024-11-08T00:38:08,411 DEBUG [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-08T00:38:08,411 DEBUG [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 67bc71518d7e759aa09a87781b7c9b7d:info 2024-11-08T00:38:08,760 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.meta.1731026116690.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:38:08,760 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,39151,1731026116929/3302f0f507bd%2C39151%2C1731026116929.1731026117246 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:38:09,760 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,39151,1731026116929/3302f0f507bd%2C39151%2C1731026116929.1731026117246 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:38:09,760 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.meta.1731026116690.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:38:09,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37459 {}] regionserver.HRegion(8855): Flush requested on 67bc71518d7e759aa09a87781b7c9b7d 2024-11-08T00:38:09,959 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 67bc71518d7e759aa09a87781b7c9b7d 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-08T00:38:09,964 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/.tmp/info/62642f3eb8a54a7baa43fcf285ee3bcc is 1080, key is row0156/info:/1731026287944/Put/seqid=0 2024-11-08T00:38:09,969 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33277 is added to blk_1073741867_1043 (size=12516) 2024-11-08T00:38:09,969 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43605 is added to blk_1073741867_1043 (size=12516) 2024-11-08T00:38:09,969 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=221 (bloomFilter=true), to=hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/.tmp/info/62642f3eb8a54a7baa43fcf285ee3bcc 2024-11-08T00:38:09,975 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/.tmp/info/62642f3eb8a54a7baa43fcf285ee3bcc as hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/info/62642f3eb8a54a7baa43fcf285ee3bcc 2024-11-08T00:38:09,981 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/info/62642f3eb8a54a7baa43fcf285ee3bcc, entries=7, sequenceid=221, filesize=12.2 K 2024-11-08T00:38:09,981 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=12.61 KB/12912 for 67bc71518d7e759aa09a87781b7c9b7d in 22ms, sequenceid=221, compaction requested=false 2024-11-08T00:38:09,982 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 67bc71518d7e759aa09a87781b7c9b7d: 2024-11-08T00:38:09,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37459 {}] regionserver.HRegion(8855): Flush requested on 67bc71518d7e759aa09a87781b7c9b7d 2024-11-08T00:38:09,983 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 67bc71518d7e759aa09a87781b7c9b7d 1/1 column families, dataSize=13.66 KB heapSize=14.88 KB 2024-11-08T00:38:09,988 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/.tmp/info/26042c4b44e648ea9e4f14ef7cbbff6a is 1080, key is row0163/info:/1731026289960/Put/seqid=0 2024-11-08T00:38:09,993 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43605 is added to blk_1073741868_1044 (size=19000) 2024-11-08T00:38:09,993 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33277 is added to blk_1073741868_1044 (size=19000) 2024-11-08T00:38:09,993 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.66 KB at sequenceid=237 (bloomFilter=true), to=hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/.tmp/info/26042c4b44e648ea9e4f14ef7cbbff6a 2024-11-08T00:38:09,998 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/.tmp/info/26042c4b44e648ea9e4f14ef7cbbff6a as hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/info/26042c4b44e648ea9e4f14ef7cbbff6a 2024-11-08T00:38:10,003 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/info/26042c4b44e648ea9e4f14ef7cbbff6a, entries=13, sequenceid=237, filesize=18.6 K 2024-11-08T00:38:10,004 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~13.66 KB/13988, heapSize ~14.86 KB/15216, currentSize=10.51 KB/10760 for 67bc71518d7e759aa09a87781b7c9b7d in 21ms, sequenceid=237, compaction requested=true 2024-11-08T00:38:10,004 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 67bc71518d7e759aa09a87781b7c9b7d: 2024-11-08T00:38:10,004 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 67bc71518d7e759aa09a87781b7c9b7d:info, priority=-2147483648, current under compaction store size is 1 2024-11-08T00:38:10,004 DEBUG [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-08T00:38:10,004 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-08T00:38:10,005 DEBUG [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 138557 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-08T00:38:10,005 DEBUG [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] regionserver.HStore(1541): 67bc71518d7e759aa09a87781b7c9b7d/info is initiating minor compaction (all files) 2024-11-08T00:38:10,005 INFO [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 67bc71518d7e759aa09a87781b7c9b7d/info in TestLogRolling-testLogRolling,row0062,1731026273707.67bc71518d7e759aa09a87781b7c9b7d. 2024-11-08T00:38:10,005 INFO [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/info/b22caf5b9ab34cb199e1df85c57efece, hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/info/62642f3eb8a54a7baa43fcf285ee3bcc, hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/info/26042c4b44e648ea9e4f14ef7cbbff6a] into tmpdir=hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/.tmp, totalSize=135.3 K 2024-11-08T00:38:10,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37459 {}] regionserver.HRegion(8855): Flush requested on 67bc71518d7e759aa09a87781b7c9b7d 2024-11-08T00:38:10,006 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 67bc71518d7e759aa09a87781b7c9b7d 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-11-08T00:38:10,006 DEBUG [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] compactions.Compactor(225): Compacting b22caf5b9ab34cb199e1df85c57efece, keycount=94, bloomtype=ROW, size=104.5 K, encoding=NONE, compression=NONE, seqNum=209, earliestPutTs=1731026271602 2024-11-08T00:38:10,006 DEBUG [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] compactions.Compactor(225): Compacting 62642f3eb8a54a7baa43fcf285ee3bcc, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=221, earliestPutTs=1731026287944 2024-11-08T00:38:10,007 DEBUG [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] compactions.Compactor(225): Compacting 26042c4b44e648ea9e4f14ef7cbbff6a, keycount=13, bloomtype=ROW, size=18.6 K, encoding=NONE, compression=NONE, seqNum=237, earliestPutTs=1731026289960 2024-11-08T00:38:10,009 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/.tmp/info/392ba9b0c4fa4bbaaa58a6627cf22c17 is 1080, key is row0176/info:/1731026289984/Put/seqid=0 2024-11-08T00:38:10,018 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43605 is added to blk_1073741869_1045 (size=16828) 2024-11-08T00:38:10,021 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=251 (bloomFilter=true), to=hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/.tmp/info/392ba9b0c4fa4bbaaa58a6627cf22c17 2024-11-08T00:38:10,021 INFO [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 67bc71518d7e759aa09a87781b7c9b7d#info#compaction#84 average throughput is 58.49 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-08T00:38:10,022 DEBUG [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/.tmp/info/5a27907c26a94352a90de859b5731df8 is 1080, key is row0062/info:/1731026271602/Put/seqid=0 2024-11-08T00:38:10,022 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33277 is added to blk_1073741869_1045 (size=16828) 2024-11-08T00:38:10,026 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43605 is added to blk_1073741870_1046 (size=128835) 2024-11-08T00:38:10,027 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33277 is added to blk_1073741870_1046 (size=128835) 2024-11-08T00:38:10,028 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/.tmp/info/392ba9b0c4fa4bbaaa58a6627cf22c17 as hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/info/392ba9b0c4fa4bbaaa58a6627cf22c17 2024-11-08T00:38:10,032 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/info/392ba9b0c4fa4bbaaa58a6627cf22c17, entries=11, sequenceid=251, filesize=16.4 K 2024-11-08T00:38:10,033 DEBUG [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/.tmp/info/5a27907c26a94352a90de859b5731df8 as hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/info/5a27907c26a94352a90de859b5731df8 2024-11-08T00:38:10,033 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=6.30 KB/6456 for 67bc71518d7e759aa09a87781b7c9b7d in 27ms, sequenceid=251, compaction requested=false 2024-11-08T00:38:10,033 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 67bc71518d7e759aa09a87781b7c9b7d: 2024-11-08T00:38:10,039 INFO [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 67bc71518d7e759aa09a87781b7c9b7d/info of 67bc71518d7e759aa09a87781b7c9b7d into 5a27907c26a94352a90de859b5731df8(size=125.8 K), total size for store is 142.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-08T00:38:10,039 DEBUG [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 67bc71518d7e759aa09a87781b7c9b7d: 2024-11-08T00:38:10,039 INFO [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731026273707.67bc71518d7e759aa09a87781b7c9b7d., storeName=67bc71518d7e759aa09a87781b7c9b7d/info, priority=13, startTime=1731026290004; duration=0sec 2024-11-08T00:38:10,039 DEBUG [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-08T00:38:10,039 DEBUG [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 67bc71518d7e759aa09a87781b7c9b7d:info 2024-11-08T00:38:10,762 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.meta.1731026116690.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:38:10,762 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,39151,1731026116929/3302f0f507bd%2C39151%2C1731026116929.1731026117246 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:38:11,763 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.meta.1731026116690.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:38:11,763 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,39151,1731026116929/3302f0f507bd%2C39151%2C1731026116929.1731026117246 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:38:12,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37459 {}] regionserver.HRegion(8855): Flush requested on 67bc71518d7e759aa09a87781b7c9b7d 2024-11-08T00:38:12,023 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 67bc71518d7e759aa09a87781b7c9b7d 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-08T00:38:12,030 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/.tmp/info/b7b3612e289d4c6c974318192019a945 is 1080, key is row0187/info:/1731026290007/Put/seqid=0 2024-11-08T00:38:12,036 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33277 is added to blk_1073741871_1047 (size=12520) 2024-11-08T00:38:12,036 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43605 is added to blk_1073741871_1047 (size=12520) 2024-11-08T00:38:12,036 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=262 (bloomFilter=true), to=hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/.tmp/info/b7b3612e289d4c6c974318192019a945 2024-11-08T00:38:12,042 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/.tmp/info/b7b3612e289d4c6c974318192019a945 as hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/info/b7b3612e289d4c6c974318192019a945 2024-11-08T00:38:12,047 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/info/b7b3612e289d4c6c974318192019a945, entries=7, sequenceid=262, filesize=12.2 K 2024-11-08T00:38:12,048 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=9.46 KB/9684 for 67bc71518d7e759aa09a87781b7c9b7d in 24ms, sequenceid=262, compaction requested=true 2024-11-08T00:38:12,048 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 67bc71518d7e759aa09a87781b7c9b7d: 2024-11-08T00:38:12,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37459 {}] regionserver.HRegion(8855): Flush requested on 67bc71518d7e759aa09a87781b7c9b7d 2024-11-08T00:38:12,048 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 67bc71518d7e759aa09a87781b7c9b7d:info, priority=-2147483648, current under compaction store size is 1 2024-11-08T00:38:12,048 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-08T00:38:12,048 DEBUG [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-08T00:38:12,048 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 67bc71518d7e759aa09a87781b7c9b7d 1/1 column families, dataSize=10.51 KB heapSize=11.50 KB 2024-11-08T00:38:12,049 DEBUG [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 158183 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-08T00:38:12,049 DEBUG [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] regionserver.HStore(1541): 67bc71518d7e759aa09a87781b7c9b7d/info is initiating minor compaction (all files) 2024-11-08T00:38:12,049 INFO [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 67bc71518d7e759aa09a87781b7c9b7d/info in TestLogRolling-testLogRolling,row0062,1731026273707.67bc71518d7e759aa09a87781b7c9b7d. 2024-11-08T00:38:12,049 INFO [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/info/5a27907c26a94352a90de859b5731df8, hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/info/392ba9b0c4fa4bbaaa58a6627cf22c17, hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/info/b7b3612e289d4c6c974318192019a945] into tmpdir=hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/.tmp, totalSize=154.5 K 2024-11-08T00:38:12,050 DEBUG [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] compactions.Compactor(225): Compacting 5a27907c26a94352a90de859b5731df8, keycount=114, bloomtype=ROW, size=125.8 K, encoding=NONE, compression=NONE, seqNum=237, earliestPutTs=1731026271602 2024-11-08T00:38:12,050 DEBUG [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] compactions.Compactor(225): Compacting 392ba9b0c4fa4bbaaa58a6627cf22c17, keycount=11, bloomtype=ROW, size=16.4 K, encoding=NONE, compression=NONE, seqNum=251, earliestPutTs=1731026289984 2024-11-08T00:38:12,050 DEBUG [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] compactions.Compactor(225): Compacting b7b3612e289d4c6c974318192019a945, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=262, earliestPutTs=1731026290007 2024-11-08T00:38:12,051 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/.tmp/info/62541ae2dc654418ad1ceddffc797eb0 is 1080, key is row0194/info:/1731026292025/Put/seqid=0 2024-11-08T00:38:12,057 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43605 is added to blk_1073741872_1048 (size=15760) 2024-11-08T00:38:12,058 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33277 is added to blk_1073741872_1048 (size=15760) 2024-11-08T00:38:12,063 INFO [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 67bc71518d7e759aa09a87781b7c9b7d#info#compaction#87 average throughput is 67.73 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-08T00:38:12,064 DEBUG [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/.tmp/info/cd4a377418ae4f4f9fc62fc1a6c2e8e2 is 1080, key is row0062/info:/1731026271602/Put/seqid=0 2024-11-08T00:38:12,067 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33277 is added to blk_1073741873_1049 (size=148418) 2024-11-08T00:38:12,067 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43605 is added to blk_1073741873_1049 (size=148418) 2024-11-08T00:38:12,083 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37459 {}] regionserver.HRegion(5310): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=67bc71518d7e759aa09a87781b7c9b7d, server=3302f0f507bd,37459,1731026258169 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] 2024-11-08T00:38:12,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37459 {}] ipc.CallRunner(138): callId: 231 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.3:58702 deadline: 1731026302082, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=67bc71518d7e759aa09a87781b7c9b7d, server=3302f0f507bd,37459,1731026258169 2024-11-08T00:38:12,084 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,row0062,1731026273707.67bc71518d7e759aa09a87781b7c9b7d., hostname=3302f0f507bd,37459,1731026258169, seqNum=131 , the old value is region=TestLogRolling-testLogRolling,row0062,1731026273707.67bc71518d7e759aa09a87781b7c9b7d., hostname=3302f0f507bd,37459,1731026258169, seqNum=131, error=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=67bc71518d7e759aa09a87781b7c9b7d, server=3302f0f507bd,37459,1731026258169 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-08T00:38:12,084 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,row0062,1731026273707.67bc71518d7e759aa09a87781b7c9b7d., hostname=3302f0f507bd,37459,1731026258169, seqNum=131 is org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=67bc71518d7e759aa09a87781b7c9b7d, server=3302f0f507bd,37459,1731026258169 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-08T00:38:12,084 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestLogRolling-testLogRolling,row0062,1731026273707.67bc71518d7e759aa09a87781b7c9b7d., hostname=3302f0f507bd,37459,1731026258169, seqNum=131 because the exception is null or not the one we care about 2024-11-08T00:38:12,459 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=10.51 KB at sequenceid=275 (bloomFilter=true), to=hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/.tmp/info/62541ae2dc654418ad1ceddffc797eb0 2024-11-08T00:38:12,467 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/.tmp/info/62541ae2dc654418ad1ceddffc797eb0 as hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/info/62541ae2dc654418ad1ceddffc797eb0 2024-11-08T00:38:12,473 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/info/62541ae2dc654418ad1ceddffc797eb0, entries=10, sequenceid=275, filesize=15.4 K 2024-11-08T00:38:12,474 DEBUG [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/.tmp/info/cd4a377418ae4f4f9fc62fc1a6c2e8e2 as hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/info/cd4a377418ae4f4f9fc62fc1a6c2e8e2 2024-11-08T00:38:12,474 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~10.51 KB/10760, heapSize ~11.48 KB/11760, currentSize=19.96 KB/20444 for 67bc71518d7e759aa09a87781b7c9b7d in 426ms, sequenceid=275, compaction requested=false 2024-11-08T00:38:12,475 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 67bc71518d7e759aa09a87781b7c9b7d: 2024-11-08T00:38:12,481 INFO [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 67bc71518d7e759aa09a87781b7c9b7d/info of 67bc71518d7e759aa09a87781b7c9b7d into cd4a377418ae4f4f9fc62fc1a6c2e8e2(size=144.9 K), total size for store is 160.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-08T00:38:12,481 DEBUG [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 67bc71518d7e759aa09a87781b7c9b7d: 2024-11-08T00:38:12,481 INFO [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731026273707.67bc71518d7e759aa09a87781b7c9b7d., storeName=67bc71518d7e759aa09a87781b7c9b7d/info, priority=13, startTime=1731026292048; duration=0sec 2024-11-08T00:38:12,481 DEBUG [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-08T00:38:12,481 DEBUG [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 67bc71518d7e759aa09a87781b7c9b7d:info 2024-11-08T00:38:12,764 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,39151,1731026116929/3302f0f507bd%2C39151%2C1731026116929.1731026117246 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:38:12,764 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.meta.1731026116690.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:38:13,765 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,39151,1731026116929/3302f0f507bd%2C39151%2C1731026116929.1731026117246 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:38:13,765 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.meta.1731026116690.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:38:14,766 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,39151,1731026116929/3302f0f507bd%2C39151%2C1731026116929.1731026117246 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:38:14,766 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.meta.1731026116690.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:38:15,767 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,39151,1731026116929/3302f0f507bd%2C39151%2C1731026116929.1731026117246 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:38:15,767 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.meta.1731026116690.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:38:16,768 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,39151,1731026116929/3302f0f507bd%2C39151%2C1731026116929.1731026117246 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:38:16,768 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.meta.1731026116690.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:38:17,769 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,39151,1731026116929/3302f0f507bd%2C39151%2C1731026116929.1731026117246 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:38:17,769 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.meta.1731026116690.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:38:18,770 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,39151,1731026116929/3302f0f507bd%2C39151%2C1731026116929.1731026117246 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:38:18,770 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.meta.1731026116690.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:38:19,335 INFO [master/3302f0f507bd:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-08T00:38:19,335 INFO [master/3302f0f507bd:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-08T00:38:19,770 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.meta.1731026116690.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:38:19,770 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,39151,1731026116929/3302f0f507bd%2C39151%2C1731026116929.1731026117246 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:38:20,772 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,39151,1731026116929/3302f0f507bd%2C39151%2C1731026116929.1731026117246 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:38:20,772 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.meta.1731026116690.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:38:21,773 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.meta.1731026116690.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:38:21,773 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,39151,1731026116929/3302f0f507bd%2C39151%2C1731026116929.1731026117246 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:38:22,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37459 {}] regionserver.HRegion(8855): Flush requested on 67bc71518d7e759aa09a87781b7c9b7d 2024-11-08T00:38:22,176 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 67bc71518d7e759aa09a87781b7c9b7d 1/1 column families, dataSize=21.02 KB heapSize=22.75 KB 2024-11-08T00:38:22,184 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/.tmp/info/e82a5b6381cc4f0aa1bd22e45f367861 is 1080, key is row0204/info:/1731026292049/Put/seqid=0 2024-11-08T00:38:22,189 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43605 is added to blk_1073741874_1050 (size=26570) 2024-11-08T00:38:22,189 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33277 is added to blk_1073741874_1050 (size=26570) 2024-11-08T00:38:22,190 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=21.02 KB at sequenceid=299 (bloomFilter=true), to=hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/.tmp/info/e82a5b6381cc4f0aa1bd22e45f367861 2024-11-08T00:38:22,196 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/.tmp/info/e82a5b6381cc4f0aa1bd22e45f367861 as hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/info/e82a5b6381cc4f0aa1bd22e45f367861 2024-11-08T00:38:22,202 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/info/e82a5b6381cc4f0aa1bd22e45f367861, entries=20, sequenceid=299, filesize=25.9 K 2024-11-08T00:38:22,203 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~21.02 KB/21520, heapSize ~22.73 KB/23280, currentSize=1.05 KB/1076 for 67bc71518d7e759aa09a87781b7c9b7d in 27ms, sequenceid=299, compaction requested=true 2024-11-08T00:38:22,203 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 67bc71518d7e759aa09a87781b7c9b7d: 2024-11-08T00:38:22,203 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 67bc71518d7e759aa09a87781b7c9b7d:info, priority=-2147483648, current under compaction store size is 1 2024-11-08T00:38:22,203 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-08T00:38:22,203 DEBUG [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-08T00:38:22,205 DEBUG [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 190748 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-08T00:38:22,205 DEBUG [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] regionserver.HStore(1541): 67bc71518d7e759aa09a87781b7c9b7d/info is initiating minor compaction (all files) 2024-11-08T00:38:22,205 INFO [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 67bc71518d7e759aa09a87781b7c9b7d/info in TestLogRolling-testLogRolling,row0062,1731026273707.67bc71518d7e759aa09a87781b7c9b7d. 2024-11-08T00:38:22,205 INFO [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/info/cd4a377418ae4f4f9fc62fc1a6c2e8e2, hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/info/62541ae2dc654418ad1ceddffc797eb0, hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/info/e82a5b6381cc4f0aa1bd22e45f367861] into tmpdir=hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/.tmp, totalSize=186.3 K 2024-11-08T00:38:22,206 DEBUG [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] compactions.Compactor(225): Compacting cd4a377418ae4f4f9fc62fc1a6c2e8e2, keycount=132, bloomtype=ROW, size=144.9 K, encoding=NONE, compression=NONE, seqNum=262, earliestPutTs=1731026271602 2024-11-08T00:38:22,206 DEBUG [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] compactions.Compactor(225): Compacting 62541ae2dc654418ad1ceddffc797eb0, keycount=10, bloomtype=ROW, size=15.4 K, encoding=NONE, compression=NONE, seqNum=275, earliestPutTs=1731026292025 2024-11-08T00:38:22,207 DEBUG [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] compactions.Compactor(225): Compacting e82a5b6381cc4f0aa1bd22e45f367861, keycount=20, bloomtype=ROW, size=25.9 K, encoding=NONE, compression=NONE, seqNum=299, earliestPutTs=1731026292049 2024-11-08T00:38:22,221 INFO [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 67bc71518d7e759aa09a87781b7c9b7d#info#compaction#89 average throughput is 41.56 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-08T00:38:22,222 DEBUG [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/.tmp/info/74e78d24b7a04b9ba2404e0733699ad0 is 1080, key is row0062/info:/1731026271602/Put/seqid=0 2024-11-08T00:38:22,223 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43605 is added to blk_1073741875_1051 (size=180898) 2024-11-08T00:38:22,224 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33277 is added to blk_1073741875_1051 (size=180898) 2024-11-08T00:38:22,229 DEBUG [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/.tmp/info/74e78d24b7a04b9ba2404e0733699ad0 as hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/info/74e78d24b7a04b9ba2404e0733699ad0 2024-11-08T00:38:22,236 INFO [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 67bc71518d7e759aa09a87781b7c9b7d/info of 67bc71518d7e759aa09a87781b7c9b7d into 74e78d24b7a04b9ba2404e0733699ad0(size=176.7 K), total size for store is 176.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-08T00:38:22,236 DEBUG [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 67bc71518d7e759aa09a87781b7c9b7d: 2024-11-08T00:38:22,236 INFO [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731026273707.67bc71518d7e759aa09a87781b7c9b7d., storeName=67bc71518d7e759aa09a87781b7c9b7d/info, priority=13, startTime=1731026302203; duration=0sec 2024-11-08T00:38:22,236 DEBUG [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-08T00:38:22,236 DEBUG [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 67bc71518d7e759aa09a87781b7c9b7d:info 2024-11-08T00:38:22,773 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,39151,1731026116929/3302f0f507bd%2C39151%2C1731026116929.1731026117246 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:38:22,773 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.meta.1731026116690.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:38:23,774 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,39151,1731026116929/3302f0f507bd%2C39151%2C1731026116929.1731026117246 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:38:23,774 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.meta.1731026116690.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:38:24,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37459 {}] regionserver.HRegion(8855): Flush requested on 67bc71518d7e759aa09a87781b7c9b7d 2024-11-08T00:38:24,202 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 67bc71518d7e759aa09a87781b7c9b7d 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-08T00:38:24,208 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/.tmp/info/c46f6a995f784350a8ca4e40229fd1a3 is 1080, key is row0224/info:/1731026302179/Put/seqid=0 2024-11-08T00:38:24,213 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43605 is added to blk_1073741876_1052 (size=12523) 2024-11-08T00:38:24,214 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33277 is added to blk_1073741876_1052 (size=12523) 2024-11-08T00:38:24,214 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=310 (bloomFilter=true), to=hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/.tmp/info/c46f6a995f784350a8ca4e40229fd1a3 2024-11-08T00:38:24,221 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/.tmp/info/c46f6a995f784350a8ca4e40229fd1a3 as hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/info/c46f6a995f784350a8ca4e40229fd1a3 2024-11-08T00:38:24,226 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/info/c46f6a995f784350a8ca4e40229fd1a3, entries=7, sequenceid=310, filesize=12.2 K 2024-11-08T00:38:24,227 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=10.51 KB/10760 for 67bc71518d7e759aa09a87781b7c9b7d in 25ms, sequenceid=310, compaction requested=false 2024-11-08T00:38:24,227 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 67bc71518d7e759aa09a87781b7c9b7d: 2024-11-08T00:38:24,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37459 {}] regionserver.HRegion(8855): Flush requested on 67bc71518d7e759aa09a87781b7c9b7d 2024-11-08T00:38:24,228 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 67bc71518d7e759aa09a87781b7c9b7d 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-11-08T00:38:24,230 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 1588230740, had cached 0 bytes from a total of 20340 2024-11-08T00:38:24,231 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/.tmp/info/1fa99b00b42a4bf1a84836dfdb003436 is 1080, key is row0231/info:/1731026304203/Put/seqid=0 2024-11-08T00:38:24,237 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33277 is added to blk_1073741877_1053 (size=16839) 2024-11-08T00:38:24,238 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43605 is added to blk_1073741877_1053 (size=16839) 2024-11-08T00:38:24,238 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=324 (bloomFilter=true), to=hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/.tmp/info/1fa99b00b42a4bf1a84836dfdb003436 2024-11-08T00:38:24,243 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/.tmp/info/1fa99b00b42a4bf1a84836dfdb003436 as hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/info/1fa99b00b42a4bf1a84836dfdb003436 2024-11-08T00:38:24,248 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/info/1fa99b00b42a4bf1a84836dfdb003436, entries=11, sequenceid=324, filesize=16.4 K 2024-11-08T00:38:24,249 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=11.56 KB/11836 for 67bc71518d7e759aa09a87781b7c9b7d in 21ms, sequenceid=324, compaction requested=true 2024-11-08T00:38:24,249 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 67bc71518d7e759aa09a87781b7c9b7d: 2024-11-08T00:38:24,250 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 67bc71518d7e759aa09a87781b7c9b7d:info, priority=-2147483648, current under compaction store size is 1 2024-11-08T00:38:24,250 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-08T00:38:24,250 DEBUG [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-08T00:38:24,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37459 {}] regionserver.HRegion(8855): Flush requested on 67bc71518d7e759aa09a87781b7c9b7d 2024-11-08T00:38:24,250 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 67bc71518d7e759aa09a87781b7c9b7d 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-11-08T00:38:24,250 DEBUG [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 210260 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-08T00:38:24,251 DEBUG [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] regionserver.HStore(1541): 67bc71518d7e759aa09a87781b7c9b7d/info is initiating minor compaction (all files) 2024-11-08T00:38:24,251 INFO [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 67bc71518d7e759aa09a87781b7c9b7d/info in TestLogRolling-testLogRolling,row0062,1731026273707.67bc71518d7e759aa09a87781b7c9b7d. 2024-11-08T00:38:24,251 INFO [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/info/74e78d24b7a04b9ba2404e0733699ad0, hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/info/c46f6a995f784350a8ca4e40229fd1a3, hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/info/1fa99b00b42a4bf1a84836dfdb003436] into tmpdir=hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/.tmp, totalSize=205.3 K 2024-11-08T00:38:24,251 DEBUG [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] compactions.Compactor(225): Compacting 74e78d24b7a04b9ba2404e0733699ad0, keycount=162, bloomtype=ROW, size=176.7 K, encoding=NONE, compression=NONE, seqNum=299, earliestPutTs=1731026271602 2024-11-08T00:38:24,252 DEBUG [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] compactions.Compactor(225): Compacting c46f6a995f784350a8ca4e40229fd1a3, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=310, earliestPutTs=1731026302179 2024-11-08T00:38:24,252 DEBUG [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] compactions.Compactor(225): Compacting 1fa99b00b42a4bf1a84836dfdb003436, keycount=11, bloomtype=ROW, size=16.4 K, encoding=NONE, compression=NONE, seqNum=324, earliestPutTs=1731026304203 2024-11-08T00:38:24,253 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/.tmp/info/6c7876f643b54d7d8dbb4f148b07d567 is 1080, key is row0242/info:/1731026304229/Put/seqid=0 2024-11-08T00:38:24,264 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43605 is added to blk_1073741878_1054 (size=17918) 2024-11-08T00:38:24,265 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33277 is added to blk_1073741878_1054 (size=17918) 2024-11-08T00:38:24,267 INFO [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 67bc71518d7e759aa09a87781b7c9b7d#info#compaction#93 average throughput is 61.57 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-08T00:38:24,268 DEBUG [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/.tmp/info/5bb65327627d49229f270e60cd8cec52 is 1080, key is row0062/info:/1731026271602/Put/seqid=0 2024-11-08T00:38:24,268 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=339 (bloomFilter=true), to=hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/.tmp/info/6c7876f643b54d7d8dbb4f148b07d567 2024-11-08T00:38:24,273 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/.tmp/info/6c7876f643b54d7d8dbb4f148b07d567 as hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/info/6c7876f643b54d7d8dbb4f148b07d567 2024-11-08T00:38:24,274 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33277 is added to blk_1073741879_1055 (size=200426) 2024-11-08T00:38:24,274 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43605 is added to blk_1073741879_1055 (size=200426) 2024-11-08T00:38:24,278 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/info/6c7876f643b54d7d8dbb4f148b07d567, entries=12, sequenceid=339, filesize=17.5 K 2024-11-08T00:38:24,279 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=3.15 KB/3228 for 67bc71518d7e759aa09a87781b7c9b7d in 29ms, sequenceid=339, compaction requested=false 2024-11-08T00:38:24,279 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 67bc71518d7e759aa09a87781b7c9b7d: 2024-11-08T00:38:24,279 DEBUG [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/.tmp/info/5bb65327627d49229f270e60cd8cec52 as hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/info/5bb65327627d49229f270e60cd8cec52 2024-11-08T00:38:24,285 INFO [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 67bc71518d7e759aa09a87781b7c9b7d/info of 67bc71518d7e759aa09a87781b7c9b7d into 5bb65327627d49229f270e60cd8cec52(size=195.7 K), total size for store is 213.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-08T00:38:24,285 DEBUG [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 67bc71518d7e759aa09a87781b7c9b7d: 2024-11-08T00:38:24,285 INFO [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731026273707.67bc71518d7e759aa09a87781b7c9b7d., storeName=67bc71518d7e759aa09a87781b7c9b7d/info, priority=13, startTime=1731026304249; duration=0sec 2024-11-08T00:38:24,285 DEBUG [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-08T00:38:24,285 DEBUG [RS:0;3302f0f507bd:37459-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 67bc71518d7e759aa09a87781b7c9b7d:info 2024-11-08T00:38:24,775 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,39151,1731026116929/3302f0f507bd%2C39151%2C1731026116929.1731026117246 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:38:24,775 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.meta.1731026116690.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:38:25,776 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.meta.1731026116690.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:38:25,776 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,39151,1731026116929/3302f0f507bd%2C39151%2C1731026116929.1731026117246 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:38:26,257 INFO [Time-limited test {}] wal.AbstractTestLogRolling(285): after writing there are 0 log files 2024-11-08T00:38:26,258 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3302f0f507bd%2C37459%2C1731026258169.1731026306258 2024-11-08T00:38:26,270 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:38:26,270 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:38:26,270 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:38:26,270 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:38:26,271 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:38:26,271 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/WALs/3302f0f507bd,37459,1731026258169/3302f0f507bd%2C37459%2C1731026258169.1731026258789 with entries=320, filesize=311.00 KB; new WAL /user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/WALs/3302f0f507bd,37459,1731026258169/3302f0f507bd%2C37459%2C1731026258169.1731026306258 2024-11-08T00:38:26,272 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33457:33457),(127.0.0.1/127.0.0.1:34681:34681)] 2024-11-08T00:38:26,272 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/WALs/3302f0f507bd,37459,1731026258169/3302f0f507bd%2C37459%2C1731026258169.1731026258789 is not closed yet, will try archiving it next time 2024-11-08T00:38:26,273 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33277 is added to blk_1073741833_1009 (size=318475) 2024-11-08T00:38:26,274 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43605 is added to blk_1073741833_1009 (size=318475) 2024-11-08T00:38:26,279 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=705 B heapSize=2.05 KB 2024-11-08T00:38:26,284 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/hbase/meta/1588230740/.tmp/info/9b43305d5b094aec9647dbb2b7ac3ac9 is 193, key is TestLogRolling-testLogRolling,row0062,1731026273707.67bc71518d7e759aa09a87781b7c9b7d./info:regioninfo/1731026274419/Put/seqid=0 2024-11-08T00:38:26,287 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33277 is added to blk_1073741881_1057 (size=6223) 2024-11-08T00:38:26,288 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43605 is added to blk_1073741881_1057 (size=6223) 2024-11-08T00:38:26,288 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=705 B at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/hbase/meta/1588230740/.tmp/info/9b43305d5b094aec9647dbb2b7ac3ac9 2024-11-08T00:38:26,292 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/hbase/meta/1588230740/.tmp/info/9b43305d5b094aec9647dbb2b7ac3ac9 as hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/hbase/meta/1588230740/info/9b43305d5b094aec9647dbb2b7ac3ac9 2024-11-08T00:38:26,298 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/hbase/meta/1588230740/info/9b43305d5b094aec9647dbb2b7ac3ac9, entries=5, sequenceid=21, filesize=6.1 K 2024-11-08T00:38:26,299 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~705 B/705, heapSize ~1.29 KB/1320, currentSize=0 B/0 for 1588230740 in 20ms, sequenceid=21, compaction requested=false 2024-11-08T00:38:26,299 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-08T00:38:26,299 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 67bc71518d7e759aa09a87781b7c9b7d 1/1 column families, dataSize=3.15 KB heapSize=3.63 KB 2024-11-08T00:38:26,303 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/.tmp/info/f64b18dd123a4d2bb001963ed75bbcd9 is 1080, key is row0254/info:/1731026304251/Put/seqid=0 2024-11-08T00:38:26,307 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33277 is added to blk_1073741882_1058 (size=8199) 2024-11-08T00:38:26,307 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43605 is added to blk_1073741882_1058 (size=8199) 2024-11-08T00:38:26,307 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.15 KB at sequenceid=346 (bloomFilter=true), to=hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/.tmp/info/f64b18dd123a4d2bb001963ed75bbcd9 2024-11-08T00:38:26,311 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/.tmp/info/f64b18dd123a4d2bb001963ed75bbcd9 as hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/info/f64b18dd123a4d2bb001963ed75bbcd9 2024-11-08T00:38:26,315 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/info/f64b18dd123a4d2bb001963ed75bbcd9, entries=3, sequenceid=346, filesize=8.0 K 2024-11-08T00:38:26,316 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 67bc71518d7e759aa09a87781b7c9b7d in 17ms, sequenceid=346, compaction requested=true 2024-11-08T00:38:26,316 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 67bc71518d7e759aa09a87781b7c9b7d: 2024-11-08T00:38:26,316 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 21fe38dceb9bbbd8caa13dbb69e57a46: 2024-11-08T00:38:26,317 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3302f0f507bd%2C37459%2C1731026258169.1731026306316 2024-11-08T00:38:26,321 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:38:26,321 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:38:26,321 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:38:26,321 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:38:26,321 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:38:26,321 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/WALs/3302f0f507bd,37459,1731026258169/3302f0f507bd%2C37459%2C1731026258169.1731026306258 with entries=2, filesize=723 B; new WAL /user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/WALs/3302f0f507bd,37459,1731026258169/3302f0f507bd%2C37459%2C1731026258169.1731026306316 2024-11-08T00:38:26,323 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33277 is added to blk_1073741880_1056 (size=731) 2024-11-08T00:38:26,323 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43605 is added to blk_1073741880_1056 (size=731) 2024-11-08T00:38:26,328 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/WALs/3302f0f507bd,37459,1731026258169/3302f0f507bd%2C37459%2C1731026258169.1731026258789 to hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/oldWALs/3302f0f507bd%2C37459%2C1731026258169.1731026258789 2024-11-08T00:38:26,329 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34681:34681),(127.0.0.1/127.0.0.1:33457:33457)] 2024-11-08T00:38:26,329 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [5,000] milli-secs(wait.for.ratio=[1]) 2024-11-08T00:38:26,329 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-08T00:38:26,329 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/WALs/3302f0f507bd,37459,1731026258169/3302f0f507bd%2C37459%2C1731026258169.1731026306258 to hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/oldWALs/3302f0f507bd%2C37459%2C1731026258169.1731026306258 2024-11-08T00:38:26,330 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-08T00:38:26,330 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-08T00:38:26,330 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-08T00:38:26,330 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-08T00:38:26,330 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-08T00:38:26,330 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=968579626, stopped=false 2024-11-08T00:38:26,330 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-08T00:38:26,330 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=3302f0f507bd,46443,1731026257983 2024-11-08T00:38:26,385 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46443-0x10117e083db0000, quorum=127.0.0.1:51204, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-08T00:38:26,385 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37459-0x10117e083db0001, quorum=127.0.0.1:51204, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-08T00:38:26,385 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46443-0x10117e083db0000, quorum=127.0.0.1:51204, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T00:38:26,385 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37459-0x10117e083db0001, quorum=127.0.0.1:51204, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T00:38:26,385 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-08T00:38:26,385 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-08T00:38:26,385 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-08T00:38:26,385 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-08T00:38:26,385 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '3302f0f507bd,37459,1731026258169' ***** 2024-11-08T00:38:26,385 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:46443-0x10117e083db0000, quorum=127.0.0.1:51204, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-08T00:38:26,385 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:37459-0x10117e083db0001, quorum=127.0.0.1:51204, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-08T00:38:26,385 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-08T00:38:26,386 INFO [RS:0;3302f0f507bd:37459 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-08T00:38:26,386 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-08T00:38:26,386 INFO [RS:0;3302f0f507bd:37459 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-08T00:38:26,386 INFO [RS:0;3302f0f507bd:37459 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-08T00:38:26,386 INFO [RS:0;3302f0f507bd:37459 {}] regionserver.HRegionServer(3091): Received CLOSE for 67bc71518d7e759aa09a87781b7c9b7d 2024-11-08T00:38:26,386 INFO [RS:0;3302f0f507bd:37459 {}] regionserver.HRegionServer(3091): Received CLOSE for 21fe38dceb9bbbd8caa13dbb69e57a46 2024-11-08T00:38:26,386 INFO [RS:0;3302f0f507bd:37459 {}] regionserver.HRegionServer(959): stopping server 3302f0f507bd,37459,1731026258169 2024-11-08T00:38:26,386 INFO [RS:0;3302f0f507bd:37459 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-08T00:38:26,386 DEBUG [RS_CLOSE_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 67bc71518d7e759aa09a87781b7c9b7d, disabling compactions & flushes 2024-11-08T00:38:26,386 INFO [RS:0;3302f0f507bd:37459 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;3302f0f507bd:37459. 2024-11-08T00:38:26,386 INFO [RS_CLOSE_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,row0062,1731026273707.67bc71518d7e759aa09a87781b7c9b7d. 2024-11-08T00:38:26,387 DEBUG [RS_CLOSE_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,row0062,1731026273707.67bc71518d7e759aa09a87781b7c9b7d. 2024-11-08T00:38:26,387 DEBUG [RS:0;3302f0f507bd:37459 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-08T00:38:26,387 DEBUG [RS_CLOSE_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,row0062,1731026273707.67bc71518d7e759aa09a87781b7c9b7d. after waiting 0 ms 2024-11-08T00:38:26,387 DEBUG [RS:0;3302f0f507bd:37459 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-08T00:38:26,387 DEBUG [RS_CLOSE_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,row0062,1731026273707.67bc71518d7e759aa09a87781b7c9b7d. 2024-11-08T00:38:26,387 INFO [RS:0;3302f0f507bd:37459 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-08T00:38:26,387 INFO [RS:0;3302f0f507bd:37459 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-08T00:38:26,387 INFO [RS:0;3302f0f507bd:37459 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-08T00:38:26,387 INFO [RS:0;3302f0f507bd:37459 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-08T00:38:26,387 INFO [RS:0;3302f0f507bd:37459 {}] regionserver.HRegionServer(1321): Waiting on 3 regions to close 2024-11-08T00:38:26,387 DEBUG [RS:0;3302f0f507bd:37459 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, 67bc71518d7e759aa09a87781b7c9b7d=TestLogRolling-testLogRolling,row0062,1731026273707.67bc71518d7e759aa09a87781b7c9b7d., 21fe38dceb9bbbd8caa13dbb69e57a46=TestLogRolling-testLogRolling,,1731026273707.21fe38dceb9bbbd8caa13dbb69e57a46.} 2024-11-08T00:38:26,387 DEBUG [RS:0;3302f0f507bd:37459 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 21fe38dceb9bbbd8caa13dbb69e57a46, 67bc71518d7e759aa09a87781b7c9b7d 2024-11-08T00:38:26,387 DEBUG [RS_CLOSE_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-08T00:38:26,387 INFO [RS_CLOSE_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-08T00:38:26,387 DEBUG [RS_CLOSE_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-08T00:38:26,387 DEBUG [RS_CLOSE_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-08T00:38:26,387 DEBUG [RS_CLOSE_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-08T00:38:26,387 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731026273707.67bc71518d7e759aa09a87781b7c9b7d.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/info/3d891a0fe3384a94ac90209c3eba3c60.5e8733a307b5de0ca2decb5cd91d5420->hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/5e8733a307b5de0ca2decb5cd91d5420/info/3d891a0fe3384a94ac90209c3eba3c60-top, hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/info/TestLogRolling-testLogRolling=5e8733a307b5de0ca2decb5cd91d5420-7a53a813528c40669e5144480a39fd8a, hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/info/ec7860c3f7a34c169486472cc402a10d, hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/info/TestLogRolling-testLogRolling=5e8733a307b5de0ca2decb5cd91d5420-e45604636afb46ed9fcfe82f0d6c579d, hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/info/3d36b088c61047498c14861d4efae21a, hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/info/bf4e8d1caa3d4974a673090d869514e3, hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/info/c646d1b977b249ada09be6ff745e0b3f, hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/info/2088b56bbf524b679d2ef8da6103e683, hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/info/08a563b83d754cfe9c4feb7c1457eb5f, hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/info/5f9603a4633540188581d4c34a7ac7ca, hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/info/fa7ca6a2f74d46c780264b6871d9df95, hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/info/b22caf5b9ab34cb199e1df85c57efece, hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/info/a0853d9bcc19480ea523da46ef113733, hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/info/62642f3eb8a54a7baa43fcf285ee3bcc, hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/info/5a27907c26a94352a90de859b5731df8, hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/info/26042c4b44e648ea9e4f14ef7cbbff6a, hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/info/392ba9b0c4fa4bbaaa58a6627cf22c17, hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/info/cd4a377418ae4f4f9fc62fc1a6c2e8e2, hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/info/b7b3612e289d4c6c974318192019a945, hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/info/62541ae2dc654418ad1ceddffc797eb0, hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/info/74e78d24b7a04b9ba2404e0733699ad0, hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/info/e82a5b6381cc4f0aa1bd22e45f367861, hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/info/c46f6a995f784350a8ca4e40229fd1a3, hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/info/1fa99b00b42a4bf1a84836dfdb003436] to archive 2024-11-08T00:38:26,388 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731026273707.67bc71518d7e759aa09a87781b7c9b7d.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-08T00:38:26,390 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731026273707.67bc71518d7e759aa09a87781b7c9b7d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/info/3d891a0fe3384a94ac90209c3eba3c60.5e8733a307b5de0ca2decb5cd91d5420 to hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/archive/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/info/3d891a0fe3384a94ac90209c3eba3c60.5e8733a307b5de0ca2decb5cd91d5420 2024-11-08T00:38:26,391 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731026273707.67bc71518d7e759aa09a87781b7c9b7d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/info/TestLogRolling-testLogRolling=5e8733a307b5de0ca2decb5cd91d5420-7a53a813528c40669e5144480a39fd8a to hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/archive/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/info/TestLogRolling-testLogRolling=5e8733a307b5de0ca2decb5cd91d5420-7a53a813528c40669e5144480a39fd8a 2024-11-08T00:38:26,393 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731026273707.67bc71518d7e759aa09a87781b7c9b7d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/info/ec7860c3f7a34c169486472cc402a10d to hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/archive/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/info/ec7860c3f7a34c169486472cc402a10d 2024-11-08T00:38:26,393 DEBUG [RS_CLOSE_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/hbase/meta/1588230740/recovered.edits/24.seqid, newMaxSeqId=24, maxSeqId=1 2024-11-08T00:38:26,394 DEBUG [RS_CLOSE_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-08T00:38:26,394 INFO [RS_CLOSE_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-08T00:38:26,394 DEBUG [RS_CLOSE_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731026306387Running coprocessor pre-close hooks at 1731026306387Disabling compacts and flushes for region at 1731026306387Disabling writes for close at 1731026306387Writing region close event to WAL at 1731026306390 (+3 ms)Running coprocessor post-close hooks at 1731026306394 (+4 ms)Closed at 1731026306394 2024-11-08T00:38:26,394 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731026273707.67bc71518d7e759aa09a87781b7c9b7d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/info/TestLogRolling-testLogRolling=5e8733a307b5de0ca2decb5cd91d5420-e45604636afb46ed9fcfe82f0d6c579d to hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/archive/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/info/TestLogRolling-testLogRolling=5e8733a307b5de0ca2decb5cd91d5420-e45604636afb46ed9fcfe82f0d6c579d 2024-11-08T00:38:26,394 DEBUG [RS_CLOSE_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-08T00:38:26,395 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731026273707.67bc71518d7e759aa09a87781b7c9b7d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/info/3d36b088c61047498c14861d4efae21a to hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/archive/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/info/3d36b088c61047498c14861d4efae21a 2024-11-08T00:38:26,396 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731026273707.67bc71518d7e759aa09a87781b7c9b7d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/info/bf4e8d1caa3d4974a673090d869514e3 to hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/archive/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/info/bf4e8d1caa3d4974a673090d869514e3 2024-11-08T00:38:26,397 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731026273707.67bc71518d7e759aa09a87781b7c9b7d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/info/c646d1b977b249ada09be6ff745e0b3f to hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/archive/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/info/c646d1b977b249ada09be6ff745e0b3f 2024-11-08T00:38:26,398 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731026273707.67bc71518d7e759aa09a87781b7c9b7d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/info/2088b56bbf524b679d2ef8da6103e683 to hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/archive/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/info/2088b56bbf524b679d2ef8da6103e683 2024-11-08T00:38:26,399 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731026273707.67bc71518d7e759aa09a87781b7c9b7d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/info/08a563b83d754cfe9c4feb7c1457eb5f to hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/archive/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/info/08a563b83d754cfe9c4feb7c1457eb5f 2024-11-08T00:38:26,400 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731026273707.67bc71518d7e759aa09a87781b7c9b7d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/info/5f9603a4633540188581d4c34a7ac7ca to hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/archive/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/info/5f9603a4633540188581d4c34a7ac7ca 2024-11-08T00:38:26,401 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731026273707.67bc71518d7e759aa09a87781b7c9b7d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/info/fa7ca6a2f74d46c780264b6871d9df95 to hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/archive/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/info/fa7ca6a2f74d46c780264b6871d9df95 2024-11-08T00:38:26,402 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731026273707.67bc71518d7e759aa09a87781b7c9b7d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/info/b22caf5b9ab34cb199e1df85c57efece to hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/archive/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/info/b22caf5b9ab34cb199e1df85c57efece 2024-11-08T00:38:26,403 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731026273707.67bc71518d7e759aa09a87781b7c9b7d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/info/a0853d9bcc19480ea523da46ef113733 to hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/archive/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/info/a0853d9bcc19480ea523da46ef113733 2024-11-08T00:38:26,403 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731026273707.67bc71518d7e759aa09a87781b7c9b7d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/info/62642f3eb8a54a7baa43fcf285ee3bcc to hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/archive/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/info/62642f3eb8a54a7baa43fcf285ee3bcc 2024-11-08T00:38:26,404 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731026273707.67bc71518d7e759aa09a87781b7c9b7d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/info/5a27907c26a94352a90de859b5731df8 to hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/archive/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/info/5a27907c26a94352a90de859b5731df8 2024-11-08T00:38:26,405 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731026273707.67bc71518d7e759aa09a87781b7c9b7d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/info/26042c4b44e648ea9e4f14ef7cbbff6a to hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/archive/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/info/26042c4b44e648ea9e4f14ef7cbbff6a 2024-11-08T00:38:26,406 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731026273707.67bc71518d7e759aa09a87781b7c9b7d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/info/392ba9b0c4fa4bbaaa58a6627cf22c17 to hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/archive/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/info/392ba9b0c4fa4bbaaa58a6627cf22c17 2024-11-08T00:38:26,406 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731026273707.67bc71518d7e759aa09a87781b7c9b7d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/info/cd4a377418ae4f4f9fc62fc1a6c2e8e2 to hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/archive/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/info/cd4a377418ae4f4f9fc62fc1a6c2e8e2 2024-11-08T00:38:26,407 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731026273707.67bc71518d7e759aa09a87781b7c9b7d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/info/b7b3612e289d4c6c974318192019a945 to hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/archive/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/info/b7b3612e289d4c6c974318192019a945 2024-11-08T00:38:26,408 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731026273707.67bc71518d7e759aa09a87781b7c9b7d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/info/62541ae2dc654418ad1ceddffc797eb0 to hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/archive/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/info/62541ae2dc654418ad1ceddffc797eb0 2024-11-08T00:38:26,409 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731026273707.67bc71518d7e759aa09a87781b7c9b7d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/info/74e78d24b7a04b9ba2404e0733699ad0 to hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/archive/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/info/74e78d24b7a04b9ba2404e0733699ad0 2024-11-08T00:38:26,409 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731026273707.67bc71518d7e759aa09a87781b7c9b7d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/info/e82a5b6381cc4f0aa1bd22e45f367861 to hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/archive/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/info/e82a5b6381cc4f0aa1bd22e45f367861 2024-11-08T00:38:26,410 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731026273707.67bc71518d7e759aa09a87781b7c9b7d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/info/c46f6a995f784350a8ca4e40229fd1a3 to hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/archive/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/info/c46f6a995f784350a8ca4e40229fd1a3 2024-11-08T00:38:26,411 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731026273707.67bc71518d7e759aa09a87781b7c9b7d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/info/1fa99b00b42a4bf1a84836dfdb003436 to hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/archive/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/info/1fa99b00b42a4bf1a84836dfdb003436 2024-11-08T00:38:26,411 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731026273707.67bc71518d7e759aa09a87781b7c9b7d.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=3302f0f507bd:46443 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-11-08T00:38:26,412 WARN [StoreCloser-TestLogRolling-testLogRolling,row0062,1731026273707.67bc71518d7e759aa09a87781b7c9b7d.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [ec7860c3f7a34c169486472cc402a10d=42984, 3d36b088c61047498c14861d4efae21a=12516, bf4e8d1caa3d4974a673090d869514e3=63636, c646d1b977b249ada09be6ff745e0b3f=17906, 2088b56bbf524b679d2ef8da6103e683=17906, 08a563b83d754cfe9c4feb7c1457eb5f=84293, 5f9603a4633540188581d4c34a7ac7ca=12516, fa7ca6a2f74d46c780264b6871d9df95=16828, b22caf5b9ab34cb199e1df85c57efece=107041, a0853d9bcc19480ea523da46ef113733=15750, 62642f3eb8a54a7baa43fcf285ee3bcc=12516, 5a27907c26a94352a90de859b5731df8=128835, 26042c4b44e648ea9e4f14ef7cbbff6a=19000, 392ba9b0c4fa4bbaaa58a6627cf22c17=16828, cd4a377418ae4f4f9fc62fc1a6c2e8e2=148418, b7b3612e289d4c6c974318192019a945=12520, 62541ae2dc654418ad1ceddffc797eb0=15760, 74e78d24b7a04b9ba2404e0733699ad0=180898, e82a5b6381cc4f0aa1bd22e45f367861=26570, c46f6a995f784350a8ca4e40229fd1a3=12523, 1fa99b00b42a4bf1a84836dfdb003436=16839] 2024-11-08T00:38:26,415 DEBUG [RS_CLOSE_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/67bc71518d7e759aa09a87781b7c9b7d/recovered.edits/349.seqid, newMaxSeqId=349, maxSeqId=130 2024-11-08T00:38:26,415 INFO [RS_CLOSE_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,row0062,1731026273707.67bc71518d7e759aa09a87781b7c9b7d. 2024-11-08T00:38:26,415 DEBUG [RS_CLOSE_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 67bc71518d7e759aa09a87781b7c9b7d: Waiting for close lock at 1731026306386Running coprocessor pre-close hooks at 1731026306386Disabling compacts and flushes for region at 1731026306386Disabling writes for close at 1731026306387 (+1 ms)Writing region close event to WAL at 1731026306412 (+25 ms)Running coprocessor post-close hooks at 1731026306415 (+3 ms)Closed at 1731026306415 2024-11-08T00:38:26,415 DEBUG [RS_CLOSE_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRolling,row0062,1731026273707.67bc71518d7e759aa09a87781b7c9b7d. 2024-11-08T00:38:26,416 DEBUG [RS_CLOSE_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 21fe38dceb9bbbd8caa13dbb69e57a46, disabling compactions & flushes 2024-11-08T00:38:26,416 INFO [RS_CLOSE_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1731026273707.21fe38dceb9bbbd8caa13dbb69e57a46. 2024-11-08T00:38:26,416 DEBUG [RS_CLOSE_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1731026273707.21fe38dceb9bbbd8caa13dbb69e57a46. 2024-11-08T00:38:26,416 DEBUG [RS_CLOSE_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1731026273707.21fe38dceb9bbbd8caa13dbb69e57a46. after waiting 0 ms 2024-11-08T00:38:26,416 DEBUG [RS_CLOSE_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1731026273707.21fe38dceb9bbbd8caa13dbb69e57a46. 2024-11-08T00:38:26,416 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731026273707.21fe38dceb9bbbd8caa13dbb69e57a46.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/21fe38dceb9bbbd8caa13dbb69e57a46/info/3d891a0fe3384a94ac90209c3eba3c60.5e8733a307b5de0ca2decb5cd91d5420->hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/5e8733a307b5de0ca2decb5cd91d5420/info/3d891a0fe3384a94ac90209c3eba3c60-bottom] to archive 2024-11-08T00:38:26,417 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731026273707.21fe38dceb9bbbd8caa13dbb69e57a46.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-08T00:38:26,418 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731026273707.21fe38dceb9bbbd8caa13dbb69e57a46.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/21fe38dceb9bbbd8caa13dbb69e57a46/info/3d891a0fe3384a94ac90209c3eba3c60.5e8733a307b5de0ca2decb5cd91d5420 to hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/archive/data/default/TestLogRolling-testLogRolling/21fe38dceb9bbbd8caa13dbb69e57a46/info/3d891a0fe3384a94ac90209c3eba3c60.5e8733a307b5de0ca2decb5cd91d5420 2024-11-08T00:38:26,418 WARN [StoreCloser-TestLogRolling-testLogRolling,,1731026273707.21fe38dceb9bbbd8caa13dbb69e57a46.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [] 2024-11-08T00:38:26,421 DEBUG [RS_CLOSE_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/data/default/TestLogRolling-testLogRolling/21fe38dceb9bbbd8caa13dbb69e57a46/recovered.edits/135.seqid, newMaxSeqId=135, maxSeqId=130 2024-11-08T00:38:26,421 INFO [RS_CLOSE_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1731026273707.21fe38dceb9bbbd8caa13dbb69e57a46. 2024-11-08T00:38:26,421 DEBUG [RS_CLOSE_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 21fe38dceb9bbbd8caa13dbb69e57a46: Waiting for close lock at 1731026306416Running coprocessor pre-close hooks at 1731026306416Disabling compacts and flushes for region at 1731026306416Disabling writes for close at 1731026306416Writing region close event to WAL at 1731026306418 (+2 ms)Running coprocessor post-close hooks at 1731026306421 (+3 ms)Closed at 1731026306421 2024-11-08T00:38:26,421 DEBUG [RS_CLOSE_REGION-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRolling,,1731026273707.21fe38dceb9bbbd8caa13dbb69e57a46. 2024-11-08T00:38:26,587 INFO [RS:0;3302f0f507bd:37459 {}] regionserver.HRegionServer(976): stopping server 3302f0f507bd,37459,1731026258169; all regions closed. 2024-11-08T00:38:26,589 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:38:26,589 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:38:26,589 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:38:26,590 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:38:26,590 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:38:26,595 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43605 is added to blk_1073741834_1010 (size=8107) 2024-11-08T00:38:26,596 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33277 is added to blk_1073741834_1010 (size=8107) 2024-11-08T00:38:26,599 DEBUG [RS:0;3302f0f507bd:37459 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/oldWALs 2024-11-08T00:38:26,599 INFO [RS:0;3302f0f507bd:37459 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 3302f0f507bd%2C37459%2C1731026258169.meta:.meta(num 1731026259223) 2024-11-08T00:38:26,600 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:38:26,600 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:38:26,600 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:38:26,600 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:38:26,601 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:38:26,603 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43605 is added to blk_1073741883_1059 (size=780) 2024-11-08T00:38:26,603 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33277 is added to blk_1073741883_1059 (size=780) 2024-11-08T00:38:26,605 DEBUG [RS:0;3302f0f507bd:37459 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/oldWALs 2024-11-08T00:38:26,605 INFO [RS:0;3302f0f507bd:37459 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 3302f0f507bd%2C37459%2C1731026258169:(num 1731026306316) 2024-11-08T00:38:26,605 DEBUG [RS:0;3302f0f507bd:37459 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-08T00:38:26,605 INFO [RS:0;3302f0f507bd:37459 {}] regionserver.LeaseManager(133): Closed leases 2024-11-08T00:38:26,605 INFO [RS:0;3302f0f507bd:37459 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-08T00:38:26,605 INFO [RS:0;3302f0f507bd:37459 {}] hbase.ChoreService(370): Chore service for: regionserver/3302f0f507bd:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-08T00:38:26,605 INFO [RS:0;3302f0f507bd:37459 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-08T00:38:26,606 INFO [regionserver/3302f0f507bd:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-08T00:38:26,606 INFO [RS:0;3302f0f507bd:37459 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:37459 2024-11-08T00:38:26,629 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46443-0x10117e083db0000, quorum=127.0.0.1:51204, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-08T00:38:26,630 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37459-0x10117e083db0001, quorum=127.0.0.1:51204, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/3302f0f507bd,37459,1731026258169 2024-11-08T00:38:26,630 INFO [RS:0;3302f0f507bd:37459 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-08T00:38:26,640 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [3302f0f507bd,37459,1731026258169] 2024-11-08T00:38:26,650 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/3302f0f507bd,37459,1731026258169 already deleted, retry=false 2024-11-08T00:38:26,650 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 3302f0f507bd,37459,1731026258169 expired; onlineServers=0 2024-11-08T00:38:26,650 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '3302f0f507bd,46443,1731026257983' ***** 2024-11-08T00:38:26,650 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-08T00:38:26,651 INFO [M:0;3302f0f507bd:46443 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-08T00:38:26,651 INFO [M:0;3302f0f507bd:46443 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-08T00:38:26,651 DEBUG [M:0;3302f0f507bd:46443 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-08T00:38:26,651 DEBUG [M:0;3302f0f507bd:46443 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-08T00:38:26,651 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-08T00:38:26,651 DEBUG [master/3302f0f507bd:0:becomeActiveMaster-HFileCleaner.large.0-1731026258505 {}] cleaner.HFileCleaner(306): Exit Thread[master/3302f0f507bd:0:becomeActiveMaster-HFileCleaner.large.0-1731026258505,5,FailOnTimeoutGroup] 2024-11-08T00:38:26,651 DEBUG [master/3302f0f507bd:0:becomeActiveMaster-HFileCleaner.small.0-1731026258506 {}] cleaner.HFileCleaner(306): Exit Thread[master/3302f0f507bd:0:becomeActiveMaster-HFileCleaner.small.0-1731026258506,5,FailOnTimeoutGroup] 2024-11-08T00:38:26,651 INFO [M:0;3302f0f507bd:46443 {}] hbase.ChoreService(370): Chore service for: master/3302f0f507bd:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-08T00:38:26,651 INFO [M:0;3302f0f507bd:46443 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-08T00:38:26,651 DEBUG [M:0;3302f0f507bd:46443 {}] master.HMaster(1795): Stopping service threads 2024-11-08T00:38:26,651 INFO [M:0;3302f0f507bd:46443 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-08T00:38:26,651 INFO [M:0;3302f0f507bd:46443 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-08T00:38:26,651 INFO [M:0;3302f0f507bd:46443 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-08T00:38:26,651 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-08T00:38:26,661 INFO [regionserver/3302f0f507bd:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-08T00:38:26,661 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46443-0x10117e083db0000, quorum=127.0.0.1:51204, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-08T00:38:26,661 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46443-0x10117e083db0000, quorum=127.0.0.1:51204, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T00:38:26,661 DEBUG [M:0;3302f0f507bd:46443 {}] zookeeper.ZKUtil(347): master:46443-0x10117e083db0000, quorum=127.0.0.1:51204, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-08T00:38:26,661 WARN [M:0;3302f0f507bd:46443 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-08T00:38:26,662 INFO [M:0;3302f0f507bd:46443 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/.lastflushedseqids 2024-11-08T00:38:26,667 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33277 is added to blk_1073741884_1060 (size=228) 2024-11-08T00:38:26,667 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43605 is added to blk_1073741884_1060 (size=228) 2024-11-08T00:38:26,667 INFO [M:0;3302f0f507bd:46443 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-08T00:38:26,667 INFO [M:0;3302f0f507bd:46443 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-08T00:38:26,667 DEBUG [M:0;3302f0f507bd:46443 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-08T00:38:26,667 INFO [M:0;3302f0f507bd:46443 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-08T00:38:26,667 DEBUG [M:0;3302f0f507bd:46443 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-08T00:38:26,667 DEBUG [M:0;3302f0f507bd:46443 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-08T00:38:26,668 DEBUG [M:0;3302f0f507bd:46443 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-08T00:38:26,668 INFO [M:0;3302f0f507bd:46443 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=51.43 KB heapSize=63.38 KB 2024-11-08T00:38:26,681 DEBUG [M:0;3302f0f507bd:46443 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/ed2059e73fc6485eb7f90a61d3860df8 is 82, key is hbase:meta,,1/info:regioninfo/1731026259245/Put/seqid=0 2024-11-08T00:38:26,685 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33277 is added to blk_1073741885_1061 (size=5672) 2024-11-08T00:38:26,685 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43605 is added to blk_1073741885_1061 (size=5672) 2024-11-08T00:38:26,686 INFO [M:0;3302f0f507bd:46443 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/ed2059e73fc6485eb7f90a61d3860df8 2024-11-08T00:38:26,707 DEBUG [M:0;3302f0f507bd:46443 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/a832761c475c44c4b6f7d6e54e1a6768 is 750, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731026259800/Put/seqid=0 2024-11-08T00:38:26,711 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33277 is added to blk_1073741886_1062 (size=7090) 2024-11-08T00:38:26,712 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43605 is added to blk_1073741886_1062 (size=7090) 2024-11-08T00:38:26,712 INFO [M:0;3302f0f507bd:46443 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=50.83 KB at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/a832761c475c44c4b6f7d6e54e1a6768 2024-11-08T00:38:26,716 INFO [M:0;3302f0f507bd:46443 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for a832761c475c44c4b6f7d6e54e1a6768 2024-11-08T00:38:26,729 DEBUG [M:0;3302f0f507bd:46443 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/48b410cb10ff45bba1b87f1c0b7bc096 is 69, key is 3302f0f507bd,37459,1731026258169/rs:state/1731026258639/Put/seqid=0 2024-11-08T00:38:26,734 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33277 is added to blk_1073741887_1063 (size=5156) 2024-11-08T00:38:26,734 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43605 is added to blk_1073741887_1063 (size=5156) 2024-11-08T00:38:26,734 INFO [M:0;3302f0f507bd:46443 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/48b410cb10ff45bba1b87f1c0b7bc096 2024-11-08T00:38:26,740 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37459-0x10117e083db0001, quorum=127.0.0.1:51204, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-08T00:38:26,740 INFO [RS:0;3302f0f507bd:37459 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-08T00:38:26,740 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37459-0x10117e083db0001, quorum=127.0.0.1:51204, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-08T00:38:26,740 INFO [RS:0;3302f0f507bd:37459 {}] regionserver.HRegionServer(1031): Exiting; stopping=3302f0f507bd,37459,1731026258169; zookeeper connection closed. 2024-11-08T00:38:26,740 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@1880fe31 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@1880fe31 2024-11-08T00:38:26,741 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-08T00:38:26,751 DEBUG [M:0;3302f0f507bd:46443 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/c0d911b771a8402e87948996f342e136 is 52, key is load_balancer_on/state:d/1731026259413/Put/seqid=0 2024-11-08T00:38:26,756 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33277 is added to blk_1073741888_1064 (size=5056) 2024-11-08T00:38:26,756 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43605 is added to blk_1073741888_1064 (size=5056) 2024-11-08T00:38:26,777 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.meta.1731026116690.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:38:26,777 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,39151,1731026116929/3302f0f507bd%2C39151%2C1731026116929.1731026117246 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:38:27,158 INFO [M:0;3302f0f507bd:46443 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/c0d911b771a8402e87948996f342e136 2024-11-08T00:38:27,168 DEBUG [M:0;3302f0f507bd:46443 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/ed2059e73fc6485eb7f90a61d3860df8 as hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/ed2059e73fc6485eb7f90a61d3860df8 2024-11-08T00:38:27,173 INFO [M:0;3302f0f507bd:46443 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/ed2059e73fc6485eb7f90a61d3860df8, entries=8, sequenceid=125, filesize=5.5 K 2024-11-08T00:38:27,173 DEBUG [M:0;3302f0f507bd:46443 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/a832761c475c44c4b6f7d6e54e1a6768 as hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/a832761c475c44c4b6f7d6e54e1a6768 2024-11-08T00:38:27,177 INFO [M:0;3302f0f507bd:46443 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for a832761c475c44c4b6f7d6e54e1a6768 2024-11-08T00:38:27,177 INFO [M:0;3302f0f507bd:46443 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/a832761c475c44c4b6f7d6e54e1a6768, entries=13, sequenceid=125, filesize=6.9 K 2024-11-08T00:38:27,178 DEBUG [M:0;3302f0f507bd:46443 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/48b410cb10ff45bba1b87f1c0b7bc096 as hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/48b410cb10ff45bba1b87f1c0b7bc096 2024-11-08T00:38:27,182 INFO [M:0;3302f0f507bd:46443 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/48b410cb10ff45bba1b87f1c0b7bc096, entries=1, sequenceid=125, filesize=5.0 K 2024-11-08T00:38:27,183 DEBUG [M:0;3302f0f507bd:46443 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/c0d911b771a8402e87948996f342e136 as hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/c0d911b771a8402e87948996f342e136 2024-11-08T00:38:27,187 INFO [M:0;3302f0f507bd:46443 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34233/user/jenkins/test-data/9e5fc896-c898-d283-5460-61349e643797/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/c0d911b771a8402e87948996f342e136, entries=1, sequenceid=125, filesize=4.9 K 2024-11-08T00:38:27,188 INFO [M:0;3302f0f507bd:46443 {}] regionserver.HRegion(3140): Finished flush of dataSize ~51.43 KB/52663, heapSize ~63.32 KB/64840, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 520ms, sequenceid=125, compaction requested=false 2024-11-08T00:38:27,189 INFO [M:0;3302f0f507bd:46443 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-08T00:38:27,189 DEBUG [M:0;3302f0f507bd:46443 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731026306667Disabling compacts and flushes for region at 1731026306667Disabling writes for close at 1731026306668 (+1 ms)Obtaining lock to block concurrent updates at 1731026306668Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731026306668Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=52663, getHeapSize=64840, getOffHeapSize=0, getCellsCount=148 at 1731026306668Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731026306669 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731026306669Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731026306681 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731026306681Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731026306690 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731026306707 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731026306707Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731026306716 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731026306729 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731026306729Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731026306738 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731026306751 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731026306751Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2ed37317: reopening flushed file at 1731026307167 (+416 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@145e5ed: reopening flushed file at 1731026307173 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3177cfb1: reopening flushed file at 1731026307178 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4599da9b: reopening flushed file at 1731026307183 (+5 ms)Finished flush of dataSize ~51.43 KB/52663, heapSize ~63.32 KB/64840, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 520ms, sequenceid=125, compaction requested=false at 1731026307188 (+5 ms)Writing region close event to WAL at 1731026307189 (+1 ms)Closed at 1731026307189 2024-11-08T00:38:27,189 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:38:27,190 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:38:27,190 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:38:27,190 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:38:27,190 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:38:27,191 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33277 is added to blk_1073741830_1006 (size=61332) 2024-11-08T00:38:27,191 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43605 is added to blk_1073741830_1006 (size=61332) 2024-11-08T00:38:27,192 INFO [M:0;3302f0f507bd:46443 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-08T00:38:27,192 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-08T00:38:27,192 INFO [M:0;3302f0f507bd:46443 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:46443 2024-11-08T00:38:27,192 INFO [M:0;3302f0f507bd:46443 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-08T00:38:27,338 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46443-0x10117e083db0000, quorum=127.0.0.1:51204, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-08T00:38:27,338 INFO [M:0;3302f0f507bd:46443 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-08T00:38:27,338 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46443-0x10117e083db0000, quorum=127.0.0.1:51204, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-08T00:38:27,344 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@42c5c09{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-08T00:38:27,345 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5ce0a24{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-08T00:38:27,345 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-08T00:38:27,346 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2f61588{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-08T00:38:27,346 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@13ef5561{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0fc5b3bf-9aae-8b07-e2fd-d7e35ce18818/hadoop.log.dir/,STOPPED} 2024-11-08T00:38:27,349 WARN [BP-414991167-172.17.0.3-1731026255478 heartbeating to localhost/127.0.0.1:34233 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-08T00:38:27,349 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-08T00:38:27,349 WARN [BP-414991167-172.17.0.3-1731026255478 heartbeating to localhost/127.0.0.1:34233 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-414991167-172.17.0.3-1731026255478 (Datanode Uuid bf7e85fc-51ed-4bc8-98cd-c3e459598d95) service to localhost/127.0.0.1:34233 2024-11-08T00:38:27,349 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-08T00:38:27,350 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0fc5b3bf-9aae-8b07-e2fd-d7e35ce18818/cluster_f4f62a24-dad1-52fc-3311-356e57af5aa6/data/data3/current/BP-414991167-172.17.0.3-1731026255478 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-08T00:38:27,350 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0fc5b3bf-9aae-8b07-e2fd-d7e35ce18818/cluster_f4f62a24-dad1-52fc-3311-356e57af5aa6/data/data4/current/BP-414991167-172.17.0.3-1731026255478 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-08T00:38:27,350 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-08T00:38:27,352 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4a6e8e46{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-08T00:38:27,352 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1204fb24{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-08T00:38:27,352 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-08T00:38:27,352 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7f424370{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-08T00:38:27,352 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2eb912ab{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0fc5b3bf-9aae-8b07-e2fd-d7e35ce18818/hadoop.log.dir/,STOPPED} 2024-11-08T00:38:27,353 WARN [BP-414991167-172.17.0.3-1731026255478 heartbeating to localhost/127.0.0.1:34233 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-08T00:38:27,353 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-08T00:38:27,353 WARN [BP-414991167-172.17.0.3-1731026255478 heartbeating to localhost/127.0.0.1:34233 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-414991167-172.17.0.3-1731026255478 (Datanode Uuid 4bcf6621-59ec-4f2b-9b89-673e26f6a24c) service to localhost/127.0.0.1:34233 2024-11-08T00:38:27,353 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-08T00:38:27,354 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0fc5b3bf-9aae-8b07-e2fd-d7e35ce18818/cluster_f4f62a24-dad1-52fc-3311-356e57af5aa6/data/data1/current/BP-414991167-172.17.0.3-1731026255478 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-08T00:38:27,354 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0fc5b3bf-9aae-8b07-e2fd-d7e35ce18818/cluster_f4f62a24-dad1-52fc-3311-356e57af5aa6/data/data2/current/BP-414991167-172.17.0.3-1731026255478 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-08T00:38:27,354 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-08T00:38:27,358 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5ff7780b{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-08T00:38:27,359 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@20d96a0d{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-08T00:38:27,359 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-08T00:38:27,359 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@425d5d71{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-08T00:38:27,359 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@314e7370{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0fc5b3bf-9aae-8b07-e2fd-d7e35ce18818/hadoop.log.dir/,STOPPED} 2024-11-08T00:38:27,365 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-08T00:38:27,394 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-08T00:38:27,405 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRolling Thread=233 (was 210) Potentially hanging thread: nioEventLoopGroup-38-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.6@localhost:34233 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:34233 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-38-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:34233 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:34233 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:34233 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:34233 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:34233 from jenkins.hfs.6 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:34233 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=509 (was 483) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=123 (was 154), ProcessCount=11 (was 11), AvailableMemoryMB=6714 (was 6767) 2024-11-08T00:38:27,412 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnNothingWritten Thread=233, OpenFileDescriptor=509, MaxFileDescriptor=1048576, SystemLoadAverage=123, ProcessCount=11, AvailableMemoryMB=6714 2024-11-08T00:38:27,412 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-08T00:38:27,412 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0fc5b3bf-9aae-8b07-e2fd-d7e35ce18818/hadoop.log.dir so I do NOT create it in target/test-data/35875a07-c617-9fdd-5827-e7b831ebad2b 2024-11-08T00:38:27,412 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0fc5b3bf-9aae-8b07-e2fd-d7e35ce18818/hadoop.tmp.dir so I do NOT create it in target/test-data/35875a07-c617-9fdd-5827-e7b831ebad2b 2024-11-08T00:38:27,412 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/35875a07-c617-9fdd-5827-e7b831ebad2b/cluster_4f68b44e-a80d-5fbd-bf53-4903c75c3d25, deleteOnExit=true 2024-11-08T00:38:27,412 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-08T00:38:27,413 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/35875a07-c617-9fdd-5827-e7b831ebad2b/test.cache.data in system properties and HBase conf 2024-11-08T00:38:27,413 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/35875a07-c617-9fdd-5827-e7b831ebad2b/hadoop.tmp.dir in system properties and HBase conf 2024-11-08T00:38:27,413 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/35875a07-c617-9fdd-5827-e7b831ebad2b/hadoop.log.dir in system properties and HBase conf 2024-11-08T00:38:27,413 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/35875a07-c617-9fdd-5827-e7b831ebad2b/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-08T00:38:27,413 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/35875a07-c617-9fdd-5827-e7b831ebad2b/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-08T00:38:27,413 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-08T00:38:27,413 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-08T00:38:27,413 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/35875a07-c617-9fdd-5827-e7b831ebad2b/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-08T00:38:27,413 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/35875a07-c617-9fdd-5827-e7b831ebad2b/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-08T00:38:27,413 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/35875a07-c617-9fdd-5827-e7b831ebad2b/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-08T00:38:27,413 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/35875a07-c617-9fdd-5827-e7b831ebad2b/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-08T00:38:27,413 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/35875a07-c617-9fdd-5827-e7b831ebad2b/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-08T00:38:27,413 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/35875a07-c617-9fdd-5827-e7b831ebad2b/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-08T00:38:27,413 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/35875a07-c617-9fdd-5827-e7b831ebad2b/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-08T00:38:27,413 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/35875a07-c617-9fdd-5827-e7b831ebad2b/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-08T00:38:27,413 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/35875a07-c617-9fdd-5827-e7b831ebad2b/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-08T00:38:27,413 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/35875a07-c617-9fdd-5827-e7b831ebad2b/nfs.dump.dir in system properties and HBase conf 2024-11-08T00:38:27,414 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/35875a07-c617-9fdd-5827-e7b831ebad2b/java.io.tmpdir in system properties and HBase conf 2024-11-08T00:38:27,414 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/35875a07-c617-9fdd-5827-e7b831ebad2b/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-08T00:38:27,414 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/35875a07-c617-9fdd-5827-e7b831ebad2b/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-08T00:38:27,414 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/35875a07-c617-9fdd-5827-e7b831ebad2b/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-08T00:38:27,425 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-08T00:38:27,778 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.meta.1731026116690.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:38:27,778 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,39151,1731026116929/3302f0f507bd%2C39151%2C1731026116929.1731026117246 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:38:27,881 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-08T00:38:27,884 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-08T00:38:27,885 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-08T00:38:27,885 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-08T00:38:27,885 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-08T00:38:27,886 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-08T00:38:27,886 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4c4c959a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/35875a07-c617-9fdd-5827-e7b831ebad2b/hadoop.log.dir/,AVAILABLE} 2024-11-08T00:38:27,886 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@372f7d77{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-08T00:38:27,977 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@17951be7{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/35875a07-c617-9fdd-5827-e7b831ebad2b/java.io.tmpdir/jetty-localhost-40167-hadoop-hdfs-3_4_1-tests_jar-_-any-16308513137010524432/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-08T00:38:27,978 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@e9b8f9f{HTTP/1.1, (http/1.1)}{localhost:40167} 2024-11-08T00:38:27,978 INFO [Time-limited test {}] server.Server(415): Started @302075ms 2024-11-08T00:38:27,988 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-08T00:38:28,316 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-08T00:38:28,319 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-08T00:38:28,319 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-08T00:38:28,319 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-08T00:38:28,319 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-08T00:38:28,320 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5f638c2a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/35875a07-c617-9fdd-5827-e7b831ebad2b/hadoop.log.dir/,AVAILABLE} 2024-11-08T00:38:28,320 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@68813b82{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-08T00:38:28,413 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@44402286{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/35875a07-c617-9fdd-5827-e7b831ebad2b/java.io.tmpdir/jetty-localhost-34725-hadoop-hdfs-3_4_1-tests_jar-_-any-7728824528712588273/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-08T00:38:28,413 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2753102b{HTTP/1.1, (http/1.1)}{localhost:34725} 2024-11-08T00:38:28,413 INFO [Time-limited test {}] server.Server(415): Started @302510ms 2024-11-08T00:38:28,414 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-08T00:38:28,438 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-08T00:38:28,441 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-08T00:38:28,442 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-08T00:38:28,442 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-08T00:38:28,442 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-08T00:38:28,442 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@59a6b271{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/35875a07-c617-9fdd-5827-e7b831ebad2b/hadoop.log.dir/,AVAILABLE} 2024-11-08T00:38:28,443 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5991282a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-08T00:38:28,535 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3e7eee62{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/35875a07-c617-9fdd-5827-e7b831ebad2b/java.io.tmpdir/jetty-localhost-46383-hadoop-hdfs-3_4_1-tests_jar-_-any-4821665357532414951/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-08T00:38:28,536 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@43bb2f4c{HTTP/1.1, (http/1.1)}{localhost:46383} 2024-11-08T00:38:28,536 INFO [Time-limited test {}] server.Server(415): Started @302633ms 2024-11-08T00:38:28,537 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-08T00:38:28,779 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,39151,1731026116929/3302f0f507bd%2C39151%2C1731026116929.1731026117246 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:38:28,779 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.meta.1731026116690.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:38:29,718 WARN [Thread-2513 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/35875a07-c617-9fdd-5827-e7b831ebad2b/cluster_4f68b44e-a80d-5fbd-bf53-4903c75c3d25/data/data1/current/BP-1714520190-172.17.0.3-1731026307427/current, will proceed with Du for space computation calculation, 2024-11-08T00:38:29,719 WARN [Thread-2514 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/35875a07-c617-9fdd-5827-e7b831ebad2b/cluster_4f68b44e-a80d-5fbd-bf53-4903c75c3d25/data/data2/current/BP-1714520190-172.17.0.3-1731026307427/current, will proceed with Du for space computation calculation, 2024-11-08T00:38:29,733 WARN [Thread-2477 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-08T00:38:29,735 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xdaf78c167fe53f66 with lease ID 0xc56f26338c39ca22: Processing first storage report for DS-7c716083-e186-48b6-9f3e-4e37090d3dc3 from datanode DatanodeRegistration(127.0.0.1:41051, datanodeUuid=53feb631-d0fe-4388-a977-97c181994167, infoPort=40021, infoSecurePort=0, ipcPort=41485, storageInfo=lv=-57;cid=testClusterID;nsid=1567071903;c=1731026307427) 2024-11-08T00:38:29,735 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xdaf78c167fe53f66 with lease ID 0xc56f26338c39ca22: from storage DS-7c716083-e186-48b6-9f3e-4e37090d3dc3 node DatanodeRegistration(127.0.0.1:41051, datanodeUuid=53feb631-d0fe-4388-a977-97c181994167, infoPort=40021, infoSecurePort=0, ipcPort=41485, storageInfo=lv=-57;cid=testClusterID;nsid=1567071903;c=1731026307427), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-08T00:38:29,735 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xdaf78c167fe53f66 with lease ID 0xc56f26338c39ca22: Processing first storage report for DS-b4062238-9088-4768-be15-01553315c7f0 from datanode DatanodeRegistration(127.0.0.1:41051, datanodeUuid=53feb631-d0fe-4388-a977-97c181994167, infoPort=40021, infoSecurePort=0, ipcPort=41485, storageInfo=lv=-57;cid=testClusterID;nsid=1567071903;c=1731026307427) 2024-11-08T00:38:29,735 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xdaf78c167fe53f66 with lease ID 0xc56f26338c39ca22: from storage DS-b4062238-9088-4768-be15-01553315c7f0 node DatanodeRegistration(127.0.0.1:41051, datanodeUuid=53feb631-d0fe-4388-a977-97c181994167, infoPort=40021, infoSecurePort=0, ipcPort=41485, storageInfo=lv=-57;cid=testClusterID;nsid=1567071903;c=1731026307427), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-08T00:38:29,779 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.meta.1731026116690.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:38:29,779 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,39151,1731026116929/3302f0f507bd%2C39151%2C1731026116929.1731026117246 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:38:29,820 WARN [Thread-2524 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/35875a07-c617-9fdd-5827-e7b831ebad2b/cluster_4f68b44e-a80d-5fbd-bf53-4903c75c3d25/data/data3/current/BP-1714520190-172.17.0.3-1731026307427/current, will proceed with Du for space computation calculation, 2024-11-08T00:38:29,820 WARN [Thread-2525 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/35875a07-c617-9fdd-5827-e7b831ebad2b/cluster_4f68b44e-a80d-5fbd-bf53-4903c75c3d25/data/data4/current/BP-1714520190-172.17.0.3-1731026307427/current, will proceed with Du for space computation calculation, 2024-11-08T00:38:29,839 WARN [Thread-2500 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-08T00:38:29,840 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x3fd74b248fd435b3 with lease ID 0xc56f26338c39ca23: Processing first storage report for DS-e667f6d5-bf06-4607-8766-e6dbe9b91dcd from datanode DatanodeRegistration(127.0.0.1:39599, datanodeUuid=ce2448ca-6472-4eda-bd62-030db4343341, infoPort=40877, infoSecurePort=0, ipcPort=33261, storageInfo=lv=-57;cid=testClusterID;nsid=1567071903;c=1731026307427) 2024-11-08T00:38:29,840 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x3fd74b248fd435b3 with lease ID 0xc56f26338c39ca23: from storage DS-e667f6d5-bf06-4607-8766-e6dbe9b91dcd node DatanodeRegistration(127.0.0.1:39599, datanodeUuid=ce2448ca-6472-4eda-bd62-030db4343341, infoPort=40877, infoSecurePort=0, ipcPort=33261, storageInfo=lv=-57;cid=testClusterID;nsid=1567071903;c=1731026307427), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-08T00:38:29,841 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x3fd74b248fd435b3 with lease ID 0xc56f26338c39ca23: Processing first storage report for DS-22ea9730-2e6e-4b75-a256-c30e800f6ae6 from datanode DatanodeRegistration(127.0.0.1:39599, datanodeUuid=ce2448ca-6472-4eda-bd62-030db4343341, infoPort=40877, infoSecurePort=0, ipcPort=33261, storageInfo=lv=-57;cid=testClusterID;nsid=1567071903;c=1731026307427) 2024-11-08T00:38:29,841 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x3fd74b248fd435b3 with lease ID 0xc56f26338c39ca23: from storage DS-22ea9730-2e6e-4b75-a256-c30e800f6ae6 node DatanodeRegistration(127.0.0.1:39599, datanodeUuid=ce2448ca-6472-4eda-bd62-030db4343341, infoPort=40877, infoSecurePort=0, ipcPort=33261, storageInfo=lv=-57;cid=testClusterID;nsid=1567071903;c=1731026307427), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-08T00:38:29,868 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/35875a07-c617-9fdd-5827-e7b831ebad2b 2024-11-08T00:38:29,871 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/35875a07-c617-9fdd-5827-e7b831ebad2b/cluster_4f68b44e-a80d-5fbd-bf53-4903c75c3d25/zookeeper_0, clientPort=49892, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/35875a07-c617-9fdd-5827-e7b831ebad2b/cluster_4f68b44e-a80d-5fbd-bf53-4903c75c3d25/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/35875a07-c617-9fdd-5827-e7b831ebad2b/cluster_4f68b44e-a80d-5fbd-bf53-4903c75c3d25/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-08T00:38:29,872 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=49892 2024-11-08T00:38:29,873 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-08T00:38:29,875 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-08T00:38:29,884 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41051 is added to blk_1073741825_1001 (size=7) 2024-11-08T00:38:29,885 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39599 is added to blk_1073741825_1001 (size=7) 2024-11-08T00:38:29,886 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:36959/user/jenkins/test-data/e117faae-8f77-bbed-8f17-433573b666d8 with version=8 2024-11-08T00:38:29,886 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:45125/user/jenkins/test-data/a52118f7-c961-aacf-3827-5de0e737d868/hbase-staging 2024-11-08T00:38:29,888 INFO [Time-limited test {}] client.ConnectionUtils(128): master/3302f0f507bd:0 server-side Connection retries=45 2024-11-08T00:38:29,889 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-08T00:38:29,889 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-08T00:38:29,889 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-08T00:38:29,889 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-08T00:38:29,889 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-08T00:38:29,889 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-08T00:38:29,889 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-08T00:38:29,891 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:42055 2024-11-08T00:38:29,893 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:42055 connecting to ZooKeeper ensemble=127.0.0.1:49892 2024-11-08T00:38:29,945 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:420550x0, quorum=127.0.0.1:49892, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-08T00:38:29,946 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:42055-0x10117e14e9a0000 connected 2024-11-08T00:38:30,030 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-08T00:38:30,031 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-08T00:38:30,034 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:42055-0x10117e14e9a0000, quorum=127.0.0.1:49892, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-08T00:38:30,034 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:36959/user/jenkins/test-data/e117faae-8f77-bbed-8f17-433573b666d8, hbase.cluster.distributed=false 2024-11-08T00:38:30,036 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:42055-0x10117e14e9a0000, quorum=127.0.0.1:49892, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-08T00:38:30,036 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=42055 2024-11-08T00:38:30,037 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=42055 2024-11-08T00:38:30,037 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=42055 2024-11-08T00:38:30,037 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=42055 2024-11-08T00:38:30,038 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=42055 2024-11-08T00:38:30,055 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/3302f0f507bd:0 server-side Connection retries=45 2024-11-08T00:38:30,055 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-08T00:38:30,055 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-08T00:38:30,055 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-08T00:38:30,055 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-08T00:38:30,055 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-08T00:38:30,055 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-08T00:38:30,055 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-08T00:38:30,056 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:38327 2024-11-08T00:38:30,056 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:38327 connecting to ZooKeeper ensemble=127.0.0.1:49892 2024-11-08T00:38:30,057 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-08T00:38:30,058 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-08T00:38:30,071 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:383270x0, quorum=127.0.0.1:49892, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-08T00:38:30,072 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:38327-0x10117e14e9a0001 connected 2024-11-08T00:38:30,072 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38327-0x10117e14e9a0001, quorum=127.0.0.1:49892, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-08T00:38:30,072 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-08T00:38:30,073 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-08T00:38:30,073 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38327-0x10117e14e9a0001, quorum=127.0.0.1:49892, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-08T00:38:30,074 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38327-0x10117e14e9a0001, quorum=127.0.0.1:49892, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-08T00:38:30,074 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=38327 2024-11-08T00:38:30,074 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=38327 2024-11-08T00:38:30,075 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=38327 2024-11-08T00:38:30,075 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=38327 2024-11-08T00:38:30,075 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=38327 2024-11-08T00:38:30,085 DEBUG [M:0;3302f0f507bd:42055 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;3302f0f507bd:42055 2024-11-08T00:38:30,087 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/3302f0f507bd,42055,1731026309888 2024-11-08T00:38:30,092 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42055-0x10117e14e9a0000, quorum=127.0.0.1:49892, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-08T00:38:30,092 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38327-0x10117e14e9a0001, quorum=127.0.0.1:49892, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-08T00:38:30,093 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:42055-0x10117e14e9a0000, quorum=127.0.0.1:49892, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/3302f0f507bd,42055,1731026309888 2024-11-08T00:38:30,103 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42055-0x10117e14e9a0000, quorum=127.0.0.1:49892, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T00:38:30,103 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38327-0x10117e14e9a0001, quorum=127.0.0.1:49892, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-08T00:38:30,103 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38327-0x10117e14e9a0001, quorum=127.0.0.1:49892, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T00:38:30,103 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:42055-0x10117e14e9a0000, quorum=127.0.0.1:49892, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-08T00:38:30,104 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/3302f0f507bd,42055,1731026309888 from backup master directory 2024-11-08T00:38:30,113 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42055-0x10117e14e9a0000, quorum=127.0.0.1:49892, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/3302f0f507bd,42055,1731026309888 2024-11-08T00:38:30,113 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38327-0x10117e14e9a0001, quorum=127.0.0.1:49892, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-08T00:38:30,113 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42055-0x10117e14e9a0000, quorum=127.0.0.1:49892, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-08T00:38:30,113 WARN [master/3302f0f507bd:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-08T00:38:30,114 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=3302f0f507bd,42055,1731026309888 2024-11-08T00:38:30,125 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:36959/user/jenkins/test-data/e117faae-8f77-bbed-8f17-433573b666d8/hbase.id] with ID: e21f8e5f-876b-47ba-b424-94f4b6c8ecec 2024-11-08T00:38:30,125 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:36959/user/jenkins/test-data/e117faae-8f77-bbed-8f17-433573b666d8/.tmp/hbase.id 2024-11-08T00:38:30,131 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41051 is added to blk_1073741826_1002 (size=42) 2024-11-08T00:38:30,131 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39599 is added to blk_1073741826_1002 (size=42) 2024-11-08T00:38:30,131 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:36959/user/jenkins/test-data/e117faae-8f77-bbed-8f17-433573b666d8/.tmp/hbase.id]:[hdfs://localhost:36959/user/jenkins/test-data/e117faae-8f77-bbed-8f17-433573b666d8/hbase.id] 2024-11-08T00:38:30,143 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-08T00:38:30,143 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-08T00:38:30,144 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-08T00:38:30,155 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42055-0x10117e14e9a0000, quorum=127.0.0.1:49892, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T00:38:30,155 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38327-0x10117e14e9a0001, quorum=127.0.0.1:49892, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T00:38:30,160 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39599 is added to blk_1073741827_1003 (size=196) 2024-11-08T00:38:30,160 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41051 is added to blk_1073741827_1003 (size=196) 2024-11-08T00:38:30,161 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-08T00:38:30,162 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-08T00:38:30,162 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-08T00:38:30,167 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41051 is added to blk_1073741828_1004 (size=1189) 2024-11-08T00:38:30,168 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39599 is added to blk_1073741828_1004 (size=1189) 2024-11-08T00:38:30,168 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:36959/user/jenkins/test-data/e117faae-8f77-bbed-8f17-433573b666d8/MasterData/data/master/store 2024-11-08T00:38:30,174 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41051 is added to blk_1073741829_1005 (size=34) 2024-11-08T00:38:30,174 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39599 is added to blk_1073741829_1005 (size=34) 2024-11-08T00:38:30,175 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-08T00:38:30,175 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-08T00:38:30,175 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-08T00:38:30,175 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-08T00:38:30,175 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-08T00:38:30,175 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-08T00:38:30,175 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-08T00:38:30,175 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731026310175Disabling compacts and flushes for region at 1731026310175Disabling writes for close at 1731026310175Writing region close event to WAL at 1731026310175Closed at 1731026310175 2024-11-08T00:38:30,176 WARN [master/3302f0f507bd:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:36959/user/jenkins/test-data/e117faae-8f77-bbed-8f17-433573b666d8/MasterData/data/master/store/.initializing 2024-11-08T00:38:30,176 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:36959/user/jenkins/test-data/e117faae-8f77-bbed-8f17-433573b666d8/MasterData/WALs/3302f0f507bd,42055,1731026309888 2024-11-08T00:38:30,178 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=3302f0f507bd%2C42055%2C1731026309888, suffix=, logDir=hdfs://localhost:36959/user/jenkins/test-data/e117faae-8f77-bbed-8f17-433573b666d8/MasterData/WALs/3302f0f507bd,42055,1731026309888, archiveDir=hdfs://localhost:36959/user/jenkins/test-data/e117faae-8f77-bbed-8f17-433573b666d8/MasterData/oldWALs, maxLogs=10 2024-11-08T00:38:30,179 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3302f0f507bd%2C42055%2C1731026309888.1731026310178 2024-11-08T00:38:30,182 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/e117faae-8f77-bbed-8f17-433573b666d8/MasterData/WALs/3302f0f507bd,42055,1731026309888/3302f0f507bd%2C42055%2C1731026309888.1731026310178 2024-11-08T00:38:30,183 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40877:40877),(127.0.0.1/127.0.0.1:40021:40021)] 2024-11-08T00:38:30,184 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-08T00:38:30,184 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-08T00:38:30,184 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-08T00:38:30,184 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-08T00:38:30,188 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-08T00:38:30,189 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-08T00:38:30,189 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T00:38:30,189 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-08T00:38:30,189 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-08T00:38:30,190 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-08T00:38:30,190 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T00:38:30,190 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-08T00:38:30,190 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-08T00:38:30,191 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-08T00:38:30,191 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T00:38:30,192 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-08T00:38:30,192 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-08T00:38:30,193 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-08T00:38:30,193 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T00:38:30,193 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-08T00:38:30,193 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-08T00:38:30,194 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36959/user/jenkins/test-data/e117faae-8f77-bbed-8f17-433573b666d8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-08T00:38:30,194 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36959/user/jenkins/test-data/e117faae-8f77-bbed-8f17-433573b666d8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-08T00:38:30,195 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-08T00:38:30,195 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-08T00:38:30,195 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-08T00:38:30,196 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-08T00:38:30,198 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36959/user/jenkins/test-data/e117faae-8f77-bbed-8f17-433573b666d8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-08T00:38:30,198 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=828642, jitterRate=0.053673356771469116}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-08T00:38:30,199 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731026310184Initializing all the Stores at 1731026310185 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731026310185Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731026310187 (+2 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731026310187Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731026310187Cleaning up temporary data from old regions at 1731026310195 (+8 ms)Region opened successfully at 1731026310199 (+4 ms) 2024-11-08T00:38:30,199 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-08T00:38:30,201 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@13760890, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=3302f0f507bd/172.17.0.3:0 2024-11-08T00:38:30,202 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-08T00:38:30,202 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-08T00:38:30,202 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-08T00:38:30,202 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-08T00:38:30,202 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-08T00:38:30,203 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-08T00:38:30,203 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-08T00:38:30,204 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-08T00:38:30,205 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42055-0x10117e14e9a0000, quorum=127.0.0.1:49892, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-08T00:38:30,216 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-08T00:38:30,216 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-08T00:38:30,217 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42055-0x10117e14e9a0000, quorum=127.0.0.1:49892, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-08T00:38:30,229 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-08T00:38:30,230 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-08T00:38:30,231 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42055-0x10117e14e9a0000, quorum=127.0.0.1:49892, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-08T00:38:30,240 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-08T00:38:30,241 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42055-0x10117e14e9a0000, quorum=127.0.0.1:49892, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-08T00:38:30,250 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-08T00:38:30,254 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42055-0x10117e14e9a0000, quorum=127.0.0.1:49892, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-08T00:38:30,261 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-08T00:38:30,272 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42055-0x10117e14e9a0000, quorum=127.0.0.1:49892, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-08T00:38:30,272 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38327-0x10117e14e9a0001, quorum=127.0.0.1:49892, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-08T00:38:30,272 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42055-0x10117e14e9a0000, quorum=127.0.0.1:49892, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T00:38:30,272 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38327-0x10117e14e9a0001, quorum=127.0.0.1:49892, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T00:38:30,272 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=3302f0f507bd,42055,1731026309888, sessionid=0x10117e14e9a0000, setting cluster-up flag (Was=false) 2024-11-08T00:38:30,292 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42055-0x10117e14e9a0000, quorum=127.0.0.1:49892, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T00:38:30,292 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38327-0x10117e14e9a0001, quorum=127.0.0.1:49892, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T00:38:30,324 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-08T00:38:30,327 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=3302f0f507bd,42055,1731026309888 2024-11-08T00:38:30,345 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38327-0x10117e14e9a0001, quorum=127.0.0.1:49892, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T00:38:30,345 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42055-0x10117e14e9a0000, quorum=127.0.0.1:49892, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T00:38:30,377 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-08T00:38:30,380 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=3302f0f507bd,42055,1731026309888 2024-11-08T00:38:30,383 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:36959/user/jenkins/test-data/e117faae-8f77-bbed-8f17-433573b666d8/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-08T00:38:30,385 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-08T00:38:30,385 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-08T00:38:30,386 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-08T00:38:30,386 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 3302f0f507bd,42055,1731026309888 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-08T00:38:30,388 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/3302f0f507bd:0, corePoolSize=5, maxPoolSize=5 2024-11-08T00:38:30,388 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/3302f0f507bd:0, corePoolSize=5, maxPoolSize=5 2024-11-08T00:38:30,389 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/3302f0f507bd:0, corePoolSize=5, maxPoolSize=5 2024-11-08T00:38:30,389 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/3302f0f507bd:0, corePoolSize=5, maxPoolSize=5 2024-11-08T00:38:30,389 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/3302f0f507bd:0, corePoolSize=10, maxPoolSize=10 2024-11-08T00:38:30,389 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/3302f0f507bd:0, corePoolSize=1, maxPoolSize=1 2024-11-08T00:38:30,389 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/3302f0f507bd:0, corePoolSize=2, maxPoolSize=2 2024-11-08T00:38:30,389 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/3302f0f507bd:0, corePoolSize=1, maxPoolSize=1 2024-11-08T00:38:30,390 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731026340390 2024-11-08T00:38:30,390 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-08T00:38:30,390 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-08T00:38:30,391 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-08T00:38:30,391 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-08T00:38:30,391 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-08T00:38:30,391 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-08T00:38:30,391 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-08T00:38:30,391 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-08T00:38:30,391 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-08T00:38:30,392 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-08T00:38:30,392 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-08T00:38:30,392 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-08T00:38:30,393 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T00:38:30,393 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-08T00:38:30,393 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-08T00:38:30,393 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-08T00:38:30,393 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/3302f0f507bd:0:becomeActiveMaster-HFileCleaner.large.0-1731026310393,5,FailOnTimeoutGroup] 2024-11-08T00:38:30,394 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/3302f0f507bd:0:becomeActiveMaster-HFileCleaner.small.0-1731026310393,5,FailOnTimeoutGroup] 2024-11-08T00:38:30,394 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-08T00:38:30,394 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-08T00:38:30,394 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-08T00:38:30,394 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-08T00:38:30,399 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39599 is added to blk_1073741831_1007 (size=1321) 2024-11-08T00:38:30,400 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41051 is added to blk_1073741831_1007 (size=1321) 2024-11-08T00:38:30,401 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:36959/user/jenkins/test-data/e117faae-8f77-bbed-8f17-433573b666d8/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-08T00:38:30,401 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:36959/user/jenkins/test-data/e117faae-8f77-bbed-8f17-433573b666d8 2024-11-08T00:38:30,407 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39599 is added to blk_1073741832_1008 (size=32) 2024-11-08T00:38:30,407 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41051 is added to blk_1073741832_1008 (size=32) 2024-11-08T00:38:30,408 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-08T00:38:30,409 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-08T00:38:30,410 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-08T00:38:30,410 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T00:38:30,410 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-08T00:38:30,410 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-08T00:38:30,411 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-08T00:38:30,411 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T00:38:30,412 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-08T00:38:30,412 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-08T00:38:30,413 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-08T00:38:30,413 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T00:38:30,413 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-08T00:38:30,413 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-08T00:38:30,414 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-08T00:38:30,414 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T00:38:30,414 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-08T00:38:30,415 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-08T00:38:30,415 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36959/user/jenkins/test-data/e117faae-8f77-bbed-8f17-433573b666d8/data/hbase/meta/1588230740 2024-11-08T00:38:30,416 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36959/user/jenkins/test-data/e117faae-8f77-bbed-8f17-433573b666d8/data/hbase/meta/1588230740 2024-11-08T00:38:30,417 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-08T00:38:30,417 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-08T00:38:30,417 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-08T00:38:30,418 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-08T00:38:30,420 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36959/user/jenkins/test-data/e117faae-8f77-bbed-8f17-433573b666d8/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-08T00:38:30,420 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=826137, jitterRate=0.05048847198486328}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-08T00:38:30,421 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731026310408Initializing all the Stores at 1731026310408Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731026310408Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731026310409 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731026310409Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731026310409Cleaning up temporary data from old regions at 1731026310417 (+8 ms)Region opened successfully at 1731026310421 (+4 ms) 2024-11-08T00:38:30,421 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-08T00:38:30,421 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-08T00:38:30,421 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-08T00:38:30,421 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-08T00:38:30,421 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-08T00:38:30,421 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-08T00:38:30,421 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731026310421Disabling compacts and flushes for region at 1731026310421Disabling writes for close at 1731026310421Writing region close event to WAL at 1731026310421Closed at 1731026310421 2024-11-08T00:38:30,422 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-08T00:38:30,422 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-08T00:38:30,423 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-08T00:38:30,424 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-08T00:38:30,424 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-08T00:38:30,477 INFO [RS:0;3302f0f507bd:38327 {}] regionserver.HRegionServer(746): ClusterId : e21f8e5f-876b-47ba-b424-94f4b6c8ecec 2024-11-08T00:38:30,477 DEBUG [RS:0;3302f0f507bd:38327 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-08T00:38:30,491 DEBUG [RS:0;3302f0f507bd:38327 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-08T00:38:30,491 DEBUG [RS:0;3302f0f507bd:38327 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-08T00:38:30,505 DEBUG [RS:0;3302f0f507bd:38327 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-08T00:38:30,506 DEBUG [RS:0;3302f0f507bd:38327 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5ca406e1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=3302f0f507bd/172.17.0.3:0 2024-11-08T00:38:30,521 DEBUG [RS:0;3302f0f507bd:38327 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;3302f0f507bd:38327 2024-11-08T00:38:30,522 INFO [RS:0;3302f0f507bd:38327 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-08T00:38:30,522 INFO [RS:0;3302f0f507bd:38327 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-08T00:38:30,522 DEBUG [RS:0;3302f0f507bd:38327 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-08T00:38:30,522 INFO [RS:0;3302f0f507bd:38327 {}] regionserver.HRegionServer(2659): reportForDuty to master=3302f0f507bd,42055,1731026309888 with port=38327, startcode=1731026310054 2024-11-08T00:38:30,523 DEBUG [RS:0;3302f0f507bd:38327 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-08T00:38:30,524 INFO [HMaster-EventLoopGroup-16-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:55971, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.7 (auth:SIMPLE), service=RegionServerStatusService 2024-11-08T00:38:30,525 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42055 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 3302f0f507bd,38327,1731026310054 2024-11-08T00:38:30,525 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42055 {}] master.ServerManager(517): Registering regionserver=3302f0f507bd,38327,1731026310054 2024-11-08T00:38:30,526 DEBUG [RS:0;3302f0f507bd:38327 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:36959/user/jenkins/test-data/e117faae-8f77-bbed-8f17-433573b666d8 2024-11-08T00:38:30,526 DEBUG [RS:0;3302f0f507bd:38327 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:36959 2024-11-08T00:38:30,526 DEBUG [RS:0;3302f0f507bd:38327 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-08T00:38:30,534 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42055-0x10117e14e9a0000, quorum=127.0.0.1:49892, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-08T00:38:30,535 DEBUG [RS:0;3302f0f507bd:38327 {}] zookeeper.ZKUtil(111): regionserver:38327-0x10117e14e9a0001, quorum=127.0.0.1:49892, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/3302f0f507bd,38327,1731026310054 2024-11-08T00:38:30,535 WARN [RS:0;3302f0f507bd:38327 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-08T00:38:30,535 INFO [RS:0;3302f0f507bd:38327 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-08T00:38:30,535 DEBUG [RS:0;3302f0f507bd:38327 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:36959/user/jenkins/test-data/e117faae-8f77-bbed-8f17-433573b666d8/WALs/3302f0f507bd,38327,1731026310054 2024-11-08T00:38:30,535 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [3302f0f507bd,38327,1731026310054] 2024-11-08T00:38:30,539 INFO [RS:0;3302f0f507bd:38327 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-08T00:38:30,541 INFO [RS:0;3302f0f507bd:38327 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-08T00:38:30,541 INFO [RS:0;3302f0f507bd:38327 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-08T00:38:30,541 INFO [RS:0;3302f0f507bd:38327 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-08T00:38:30,542 INFO [RS:0;3302f0f507bd:38327 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-08T00:38:30,542 INFO [RS:0;3302f0f507bd:38327 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-08T00:38:30,542 INFO [RS:0;3302f0f507bd:38327 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-08T00:38:30,543 DEBUG [RS:0;3302f0f507bd:38327 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/3302f0f507bd:0, corePoolSize=1, maxPoolSize=1 2024-11-08T00:38:30,543 DEBUG [RS:0;3302f0f507bd:38327 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/3302f0f507bd:0, corePoolSize=1, maxPoolSize=1 2024-11-08T00:38:30,543 DEBUG [RS:0;3302f0f507bd:38327 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/3302f0f507bd:0, corePoolSize=1, maxPoolSize=1 2024-11-08T00:38:30,543 DEBUG [RS:0;3302f0f507bd:38327 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/3302f0f507bd:0, corePoolSize=1, maxPoolSize=1 2024-11-08T00:38:30,543 DEBUG [RS:0;3302f0f507bd:38327 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/3302f0f507bd:0, corePoolSize=1, maxPoolSize=1 2024-11-08T00:38:30,543 DEBUG [RS:0;3302f0f507bd:38327 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/3302f0f507bd:0, corePoolSize=2, maxPoolSize=2 2024-11-08T00:38:30,543 DEBUG [RS:0;3302f0f507bd:38327 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/3302f0f507bd:0, corePoolSize=1, maxPoolSize=1 2024-11-08T00:38:30,543 DEBUG [RS:0;3302f0f507bd:38327 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/3302f0f507bd:0, corePoolSize=1, maxPoolSize=1 2024-11-08T00:38:30,543 DEBUG [RS:0;3302f0f507bd:38327 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/3302f0f507bd:0, corePoolSize=1, maxPoolSize=1 2024-11-08T00:38:30,543 DEBUG [RS:0;3302f0f507bd:38327 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/3302f0f507bd:0, corePoolSize=1, maxPoolSize=1 2024-11-08T00:38:30,543 DEBUG [RS:0;3302f0f507bd:38327 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/3302f0f507bd:0, corePoolSize=1, maxPoolSize=1 2024-11-08T00:38:30,543 DEBUG [RS:0;3302f0f507bd:38327 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/3302f0f507bd:0, corePoolSize=1, maxPoolSize=1 2024-11-08T00:38:30,543 DEBUG [RS:0;3302f0f507bd:38327 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/3302f0f507bd:0, corePoolSize=3, maxPoolSize=3 2024-11-08T00:38:30,543 DEBUG [RS:0;3302f0f507bd:38327 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/3302f0f507bd:0, corePoolSize=3, maxPoolSize=3 2024-11-08T00:38:30,544 INFO [RS:0;3302f0f507bd:38327 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-08T00:38:30,544 INFO [RS:0;3302f0f507bd:38327 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-08T00:38:30,544 INFO [RS:0;3302f0f507bd:38327 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-08T00:38:30,544 INFO [RS:0;3302f0f507bd:38327 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-08T00:38:30,544 INFO [RS:0;3302f0f507bd:38327 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-08T00:38:30,544 INFO [RS:0;3302f0f507bd:38327 {}] hbase.ChoreService(168): Chore ScheduledChore name=3302f0f507bd,38327,1731026310054-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-08T00:38:30,560 INFO [RS:0;3302f0f507bd:38327 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-08T00:38:30,560 INFO [RS:0;3302f0f507bd:38327 {}] hbase.ChoreService(168): Chore ScheduledChore name=3302f0f507bd,38327,1731026310054-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-08T00:38:30,560 INFO [RS:0;3302f0f507bd:38327 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-08T00:38:30,560 INFO [RS:0;3302f0f507bd:38327 {}] regionserver.Replication(171): 3302f0f507bd,38327,1731026310054 started 2024-11-08T00:38:30,572 INFO [RS:0;3302f0f507bd:38327 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-08T00:38:30,572 INFO [RS:0;3302f0f507bd:38327 {}] regionserver.HRegionServer(1482): Serving as 3302f0f507bd,38327,1731026310054, RpcServer on 3302f0f507bd/172.17.0.3:38327, sessionid=0x10117e14e9a0001 2024-11-08T00:38:30,572 DEBUG [RS:0;3302f0f507bd:38327 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-08T00:38:30,572 DEBUG [RS:0;3302f0f507bd:38327 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 3302f0f507bd,38327,1731026310054 2024-11-08T00:38:30,572 DEBUG [RS:0;3302f0f507bd:38327 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '3302f0f507bd,38327,1731026310054' 2024-11-08T00:38:30,572 DEBUG [RS:0;3302f0f507bd:38327 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-08T00:38:30,573 DEBUG [RS:0;3302f0f507bd:38327 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-08T00:38:30,573 DEBUG [RS:0;3302f0f507bd:38327 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-08T00:38:30,573 DEBUG [RS:0;3302f0f507bd:38327 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-08T00:38:30,573 DEBUG [RS:0;3302f0f507bd:38327 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 3302f0f507bd,38327,1731026310054 2024-11-08T00:38:30,573 DEBUG [RS:0;3302f0f507bd:38327 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '3302f0f507bd,38327,1731026310054' 2024-11-08T00:38:30,573 DEBUG [RS:0;3302f0f507bd:38327 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-08T00:38:30,574 DEBUG [RS:0;3302f0f507bd:38327 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-08T00:38:30,574 DEBUG [RS:0;3302f0f507bd:38327 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-08T00:38:30,574 INFO [RS:0;3302f0f507bd:38327 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-08T00:38:30,574 INFO [RS:0;3302f0f507bd:38327 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-08T00:38:30,574 WARN [3302f0f507bd:42055 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-08T00:38:30,677 INFO [RS:0;3302f0f507bd:38327 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=3302f0f507bd%2C38327%2C1731026310054, suffix=, logDir=hdfs://localhost:36959/user/jenkins/test-data/e117faae-8f77-bbed-8f17-433573b666d8/WALs/3302f0f507bd,38327,1731026310054, archiveDir=hdfs://localhost:36959/user/jenkins/test-data/e117faae-8f77-bbed-8f17-433573b666d8/oldWALs, maxLogs=32 2024-11-08T00:38:30,678 INFO [RS:0;3302f0f507bd:38327 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3302f0f507bd%2C38327%2C1731026310054.1731026310678 2024-11-08T00:38:30,689 INFO [RS:0;3302f0f507bd:38327 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/e117faae-8f77-bbed-8f17-433573b666d8/WALs/3302f0f507bd,38327,1731026310054/3302f0f507bd%2C38327%2C1731026310054.1731026310678 2024-11-08T00:38:30,691 DEBUG [RS:0;3302f0f507bd:38327 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40021:40021),(127.0.0.1/127.0.0.1:40877:40877)] 2024-11-08T00:38:30,780 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.meta.1731026116690.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:38:30,780 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,39151,1731026116929/3302f0f507bd%2C39151%2C1731026116929.1731026117246 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:38:30,825 DEBUG [3302f0f507bd:42055 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-08T00:38:30,825 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=3302f0f507bd,38327,1731026310054 2024-11-08T00:38:30,827 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 3302f0f507bd,38327,1731026310054, state=OPENING 2024-11-08T00:38:30,879 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-08T00:38:30,890 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38327-0x10117e14e9a0001, quorum=127.0.0.1:49892, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T00:38:30,890 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42055-0x10117e14e9a0000, quorum=127.0.0.1:49892, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T00:38:30,891 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-08T00:38:30,891 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-08T00:38:30,891 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-08T00:38:30,891 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=3302f0f507bd,38327,1731026310054}] 2024-11-08T00:38:31,047 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-08T00:38:31,051 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:40969, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-08T00:38:31,054 INFO [RS_OPEN_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-08T00:38:31,054 INFO [RS_OPEN_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-08T00:38:31,056 INFO [RS_OPEN_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=3302f0f507bd%2C38327%2C1731026310054.meta, suffix=.meta, logDir=hdfs://localhost:36959/user/jenkins/test-data/e117faae-8f77-bbed-8f17-433573b666d8/WALs/3302f0f507bd,38327,1731026310054, archiveDir=hdfs://localhost:36959/user/jenkins/test-data/e117faae-8f77-bbed-8f17-433573b666d8/oldWALs, maxLogs=32 2024-11-08T00:38:31,057 INFO [RS_OPEN_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 3302f0f507bd%2C38327%2C1731026310054.meta.1731026311056.meta 2024-11-08T00:38:31,063 INFO [RS_OPEN_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/e117faae-8f77-bbed-8f17-433573b666d8/WALs/3302f0f507bd,38327,1731026310054/3302f0f507bd%2C38327%2C1731026310054.meta.1731026311056.meta 2024-11-08T00:38:31,068 DEBUG [RS_OPEN_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40877:40877),(127.0.0.1/127.0.0.1:40021:40021)] 2024-11-08T00:38:31,072 DEBUG [RS_OPEN_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-08T00:38:31,072 DEBUG [RS_OPEN_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-08T00:38:31,072 DEBUG [RS_OPEN_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-08T00:38:31,073 INFO [RS_OPEN_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-08T00:38:31,073 DEBUG [RS_OPEN_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-08T00:38:31,073 DEBUG [RS_OPEN_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-08T00:38:31,073 DEBUG [RS_OPEN_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-08T00:38:31,073 DEBUG [RS_OPEN_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-08T00:38:31,074 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-08T00:38:31,075 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-08T00:38:31,075 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T00:38:31,075 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-08T00:38:31,075 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-08T00:38:31,076 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-08T00:38:31,076 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T00:38:31,077 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-08T00:38:31,077 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-08T00:38:31,077 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-08T00:38:31,077 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T00:38:31,078 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-08T00:38:31,078 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-08T00:38:31,078 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-08T00:38:31,078 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T00:38:31,079 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-08T00:38:31,079 DEBUG [RS_OPEN_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-08T00:38:31,079 DEBUG [RS_OPEN_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36959/user/jenkins/test-data/e117faae-8f77-bbed-8f17-433573b666d8/data/hbase/meta/1588230740 2024-11-08T00:38:31,080 DEBUG [RS_OPEN_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36959/user/jenkins/test-data/e117faae-8f77-bbed-8f17-433573b666d8/data/hbase/meta/1588230740 2024-11-08T00:38:31,081 DEBUG [RS_OPEN_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-08T00:38:31,081 DEBUG [RS_OPEN_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-08T00:38:31,082 DEBUG [RS_OPEN_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-08T00:38:31,083 DEBUG [RS_OPEN_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-08T00:38:31,084 INFO [RS_OPEN_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=863084, jitterRate=0.09746813774108887}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-08T00:38:31,084 DEBUG [RS_OPEN_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-08T00:38:31,084 DEBUG [RS_OPEN_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731026311073Writing region info on filesystem at 1731026311073Initializing all the Stores at 1731026311074 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731026311074Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731026311074Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731026311074Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731026311074Cleaning up temporary data from old regions at 1731026311081 (+7 ms)Running coprocessor post-open hooks at 1731026311084 (+3 ms)Region opened successfully at 1731026311084 2024-11-08T00:38:31,085 INFO [RS_OPEN_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731026311046 2024-11-08T00:38:31,087 DEBUG [RS_OPEN_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-08T00:38:31,087 INFO [RS_OPEN_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-08T00:38:31,088 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=3302f0f507bd,38327,1731026310054 2024-11-08T00:38:31,089 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 3302f0f507bd,38327,1731026310054, state=OPEN 2024-11-08T00:38:31,125 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42055-0x10117e14e9a0000, quorum=127.0.0.1:49892, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-08T00:38:31,125 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38327-0x10117e14e9a0001, quorum=127.0.0.1:49892, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-08T00:38:31,125 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=3302f0f507bd,38327,1731026310054 2024-11-08T00:38:31,125 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-08T00:38:31,125 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-08T00:38:31,127 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-08T00:38:31,128 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=3302f0f507bd,38327,1731026310054 in 234 msec 2024-11-08T00:38:31,129 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-08T00:38:31,129 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 705 msec 2024-11-08T00:38:31,130 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-08T00:38:31,130 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-08T00:38:31,131 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-08T00:38:31,131 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3302f0f507bd,38327,1731026310054, seqNum=-1] 2024-11-08T00:38:31,132 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-08T00:38:31,132 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:42207, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-08T00:38:31,137 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 752 msec 2024-11-08T00:38:31,137 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731026311137, completionTime=-1 2024-11-08T00:38:31,137 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-08T00:38:31,137 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-08T00:38:31,139 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-08T00:38:31,139 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731026371139 2024-11-08T00:38:31,139 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731026431139 2024-11-08T00:38:31,139 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 1 msec 2024-11-08T00:38:31,139 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3302f0f507bd,42055,1731026309888-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-08T00:38:31,139 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3302f0f507bd,42055,1731026309888-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-08T00:38:31,139 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3302f0f507bd,42055,1731026309888-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-08T00:38:31,139 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-3302f0f507bd:42055, period=300000, unit=MILLISECONDS is enabled. 2024-11-08T00:38:31,139 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-08T00:38:31,139 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-08T00:38:31,141 DEBUG [master/3302f0f507bd:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-08T00:38:31,143 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.029sec 2024-11-08T00:38:31,143 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-08T00:38:31,143 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-08T00:38:31,143 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-08T00:38:31,143 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-08T00:38:31,143 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-08T00:38:31,143 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3302f0f507bd,42055,1731026309888-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-08T00:38:31,143 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3302f0f507bd,42055,1731026309888-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-08T00:38:31,145 DEBUG [master/3302f0f507bd:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-08T00:38:31,145 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-08T00:38:31,146 INFO [master/3302f0f507bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3302f0f507bd,42055,1731026309888-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-08T00:38:31,177 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5126135c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-08T00:38:31,178 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 3302f0f507bd,42055,-1 for getting cluster id 2024-11-08T00:38:31,178 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-08T00:38:31,179 DEBUG [HMaster-EventLoopGroup-16-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'e21f8e5f-876b-47ba-b424-94f4b6c8ecec' 2024-11-08T00:38:31,179 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-08T00:38:31,179 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "e21f8e5f-876b-47ba-b424-94f4b6c8ecec" 2024-11-08T00:38:31,180 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6635411d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-08T00:38:31,180 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [3302f0f507bd,42055,-1] 2024-11-08T00:38:31,180 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-08T00:38:31,180 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-08T00:38:31,181 INFO [HMaster-EventLoopGroup-16-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:43652, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-08T00:38:31,182 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@41b64ec2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-08T00:38:31,182 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-08T00:38:31,183 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3302f0f507bd,38327,1731026310054, seqNum=-1] 2024-11-08T00:38:31,183 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-08T00:38:31,184 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:58424, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-08T00:38:31,186 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=3302f0f507bd,42055,1731026309888 2024-11-08T00:38:31,186 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-08T00:38:31,189 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-08T00:38:31,189 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-08T00:38:31,191 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=test.com%2C8080%2C1, suffix=, logDir=hdfs://localhost:36959/user/jenkins/test-data/e117faae-8f77-bbed-8f17-433573b666d8/WALs/test.com,8080,1, archiveDir=hdfs://localhost:36959/user/jenkins/test-data/e117faae-8f77-bbed-8f17-433573b666d8/oldWALs, maxLogs=32 2024-11-08T00:38:31,192 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor test.com%2C8080%2C1.1731026311192 2024-11-08T00:38:31,197 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/e117faae-8f77-bbed-8f17-433573b666d8/WALs/test.com,8080,1/test.com%2C8080%2C1.1731026311192 2024-11-08T00:38:31,200 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40877:40877),(127.0.0.1/127.0.0.1:40021:40021)] 2024-11-08T00:38:31,206 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor test.com%2C8080%2C1.1731026311206 2024-11-08T00:38:31,213 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:38:31,213 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:38:31,213 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:38:31,214 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:38:31,214 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:38:31,214 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/e117faae-8f77-bbed-8f17-433573b666d8/WALs/test.com,8080,1/test.com%2C8080%2C1.1731026311192 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/e117faae-8f77-bbed-8f17-433573b666d8/WALs/test.com,8080,1/test.com%2C8080%2C1.1731026311206 2024-11-08T00:38:31,215 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40021:40021),(127.0.0.1/127.0.0.1:40877:40877)] 2024-11-08T00:38:31,215 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:36959/user/jenkins/test-data/e117faae-8f77-bbed-8f17-433573b666d8/WALs/test.com,8080,1/test.com%2C8080%2C1.1731026311192 is not closed yet, will try archiving it next time 2024-11-08T00:38:31,216 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41051 is added to blk_1073741835_1011 (size=93) 2024-11-08T00:38:31,216 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39599 is added to blk_1073741835_1011 (size=93) 2024-11-08T00:38:31,220 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:38:31,220 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:38:31,220 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:38:31,220 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:36959/user/jenkins/test-data/e117faae-8f77-bbed-8f17-433573b666d8/WALs/test.com,8080,1/test.com%2C8080%2C1.1731026311192 to hdfs://localhost:36959/user/jenkins/test-data/e117faae-8f77-bbed-8f17-433573b666d8/oldWALs/test.com%2C8080%2C1.1731026311192 2024-11-08T00:38:31,221 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:38:31,221 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:38:31,222 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39599 is added to blk_1073741836_1012 (size=93) 2024-11-08T00:38:31,222 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41051 is added to blk_1073741836_1012 (size=93) 2024-11-08T00:38:31,225 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/e117faae-8f77-bbed-8f17-433573b666d8/oldWALs 2024-11-08T00:38:31,225 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog test.com%2C8080%2C1:(num 1731026311206) 2024-11-08T00:38:31,225 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-08T00:38:31,225 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-08T00:38:31,226 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-08T00:38:31,226 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-08T00:38:31,226 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-08T00:38:31,226 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-08T00:38:31,226 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-08T00:38:31,226 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=572159508, stopped=false 2024-11-08T00:38:31,226 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=3302f0f507bd,42055,1731026309888 2024-11-08T00:38:31,248 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38327-0x10117e14e9a0001, quorum=127.0.0.1:49892, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-08T00:38:31,248 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42055-0x10117e14e9a0000, quorum=127.0.0.1:49892, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-08T00:38:31,248 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42055-0x10117e14e9a0000, quorum=127.0.0.1:49892, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T00:38:31,248 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38327-0x10117e14e9a0001, quorum=127.0.0.1:49892, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T00:38:31,248 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-08T00:38:31,248 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-08T00:38:31,248 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-08T00:38:31,248 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:42055-0x10117e14e9a0000, quorum=127.0.0.1:49892, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-08T00:38:31,248 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-08T00:38:31,248 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '3302f0f507bd,38327,1731026310054' ***** 2024-11-08T00:38:31,249 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-08T00:38:31,249 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:38327-0x10117e14e9a0001, quorum=127.0.0.1:49892, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-08T00:38:31,249 INFO [RS:0;3302f0f507bd:38327 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-08T00:38:31,249 INFO [RS:0;3302f0f507bd:38327 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-08T00:38:31,249 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-08T00:38:31,249 INFO [RS:0;3302f0f507bd:38327 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-08T00:38:31,249 INFO [RS:0;3302f0f507bd:38327 {}] regionserver.HRegionServer(959): stopping server 3302f0f507bd,38327,1731026310054 2024-11-08T00:38:31,249 INFO [RS:0;3302f0f507bd:38327 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-08T00:38:31,249 INFO [RS:0;3302f0f507bd:38327 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;3302f0f507bd:38327. 2024-11-08T00:38:31,249 DEBUG [RS:0;3302f0f507bd:38327 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-08T00:38:31,249 DEBUG [RS:0;3302f0f507bd:38327 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-08T00:38:31,249 INFO [RS:0;3302f0f507bd:38327 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-08T00:38:31,249 INFO [RS:0;3302f0f507bd:38327 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-08T00:38:31,249 INFO [RS:0;3302f0f507bd:38327 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-08T00:38:31,249 INFO [RS:0;3302f0f507bd:38327 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-08T00:38:31,249 INFO [RS:0;3302f0f507bd:38327 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-08T00:38:31,249 DEBUG [RS:0;3302f0f507bd:38327 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-08T00:38:31,250 DEBUG [RS:0;3302f0f507bd:38327 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-08T00:38:31,250 DEBUG [RS_CLOSE_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-08T00:38:31,250 INFO [RS_CLOSE_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-08T00:38:31,250 DEBUG [RS_CLOSE_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-08T00:38:31,250 DEBUG [RS_CLOSE_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-08T00:38:31,250 DEBUG [RS_CLOSE_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-08T00:38:31,250 INFO [RS_CLOSE_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=74 B heapSize=1.22 KB 2024-11-08T00:38:31,264 DEBUG [RS_CLOSE_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36959/user/jenkins/test-data/e117faae-8f77-bbed-8f17-433573b666d8/data/hbase/meta/1588230740/.tmp/ns/f7cc0d3d24594a49a3c370ba9f706868 is 43, key is default/ns:d/1731026311133/Put/seqid=0 2024-11-08T00:38:31,268 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41051 is added to blk_1073741837_1013 (size=5153) 2024-11-08T00:38:31,268 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39599 is added to blk_1073741837_1013 (size=5153) 2024-11-08T00:38:31,269 INFO [RS_CLOSE_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:36959/user/jenkins/test-data/e117faae-8f77-bbed-8f17-433573b666d8/data/hbase/meta/1588230740/.tmp/ns/f7cc0d3d24594a49a3c370ba9f706868 2024-11-08T00:38:31,274 DEBUG [RS_CLOSE_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36959/user/jenkins/test-data/e117faae-8f77-bbed-8f17-433573b666d8/data/hbase/meta/1588230740/.tmp/ns/f7cc0d3d24594a49a3c370ba9f706868 as hdfs://localhost:36959/user/jenkins/test-data/e117faae-8f77-bbed-8f17-433573b666d8/data/hbase/meta/1588230740/ns/f7cc0d3d24594a49a3c370ba9f706868 2024-11-08T00:38:31,277 INFO [RS_CLOSE_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36959/user/jenkins/test-data/e117faae-8f77-bbed-8f17-433573b666d8/data/hbase/meta/1588230740/ns/f7cc0d3d24594a49a3c370ba9f706868, entries=2, sequenceid=6, filesize=5.0 K 2024-11-08T00:38:31,278 INFO [RS_CLOSE_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 28ms, sequenceid=6, compaction requested=false 2024-11-08T00:38:31,278 DEBUG [RS_CLOSE_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-08T00:38:31,282 DEBUG [RS_CLOSE_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36959/user/jenkins/test-data/e117faae-8f77-bbed-8f17-433573b666d8/data/hbase/meta/1588230740/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-08T00:38:31,283 DEBUG [RS_CLOSE_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-08T00:38:31,283 INFO [RS_CLOSE_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-08T00:38:31,283 DEBUG [RS_CLOSE_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731026311250Running coprocessor pre-close hooks at 1731026311250Disabling compacts and flushes for region at 1731026311250Disabling writes for close at 1731026311250Obtaining lock to block concurrent updates at 1731026311250Preparing flush snapshotting stores in 1588230740 at 1731026311250Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=74, getHeapSize=1184, getOffHeapSize=0, getCellsCount=2 at 1731026311250Flushing stores of hbase:meta,,1.1588230740 at 1731026311251 (+1 ms)Flushing 1588230740/ns: creating writer at 1731026311251Flushing 1588230740/ns: appending metadata at 1731026311263 (+12 ms)Flushing 1588230740/ns: closing flushed file at 1731026311263Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5fdf0c9: reopening flushed file at 1731026311273 (+10 ms)Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 28ms, sequenceid=6, compaction requested=false at 1731026311278 (+5 ms)Writing region close event to WAL at 1731026311279 (+1 ms)Running coprocessor post-close hooks at 1731026311283 (+4 ms)Closed at 1731026311283 2024-11-08T00:38:31,283 DEBUG [RS_CLOSE_META-regionserver/3302f0f507bd:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-08T00:38:31,395 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:38:31,395 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:38:31,396 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:38:31,396 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:38:31,397 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:38:31,397 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:38:31,399 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:38:31,399 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:38:31,399 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:38:31,400 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:38:31,423 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:38:31,423 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:38:31,423 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:38:31,423 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:38:31,423 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:38:31,423 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:38:31,427 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:38:31,427 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:38:31,427 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:38:31,429 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T00:38:31,450 INFO [RS:0;3302f0f507bd:38327 {}] regionserver.HRegionServer(976): stopping server 3302f0f507bd,38327,1731026310054; all regions closed. 2024-11-08T00:38:31,450 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:38:31,450 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:38:31,450 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:38:31,451 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:38:31,451 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:38:31,452 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41051 is added to blk_1073741834_1010 (size=1152) 2024-11-08T00:38:31,452 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39599 is added to blk_1073741834_1010 (size=1152) 2024-11-08T00:38:31,454 DEBUG [RS:0;3302f0f507bd:38327 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/e117faae-8f77-bbed-8f17-433573b666d8/oldWALs 2024-11-08T00:38:31,454 INFO [RS:0;3302f0f507bd:38327 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 3302f0f507bd%2C38327%2C1731026310054.meta:.meta(num 1731026311056) 2024-11-08T00:38:31,455 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:38:31,455 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:38:31,455 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:38:31,455 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:38:31,455 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:38:31,456 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39599 is added to blk_1073741833_1009 (size=93) 2024-11-08T00:38:31,456 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41051 is added to blk_1073741833_1009 (size=93) 2024-11-08T00:38:31,458 DEBUG [RS:0;3302f0f507bd:38327 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/e117faae-8f77-bbed-8f17-433573b666d8/oldWALs 2024-11-08T00:38:31,458 INFO [RS:0;3302f0f507bd:38327 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 3302f0f507bd%2C38327%2C1731026310054:(num 1731026310678) 2024-11-08T00:38:31,458 DEBUG [RS:0;3302f0f507bd:38327 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-08T00:38:31,458 INFO [RS:0;3302f0f507bd:38327 {}] regionserver.LeaseManager(133): Closed leases 2024-11-08T00:38:31,458 INFO [RS:0;3302f0f507bd:38327 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-08T00:38:31,458 INFO [RS:0;3302f0f507bd:38327 {}] hbase.ChoreService(370): Chore service for: regionserver/3302f0f507bd:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-08T00:38:31,458 INFO [RS:0;3302f0f507bd:38327 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-08T00:38:31,458 INFO [regionserver/3302f0f507bd:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-08T00:38:31,459 INFO [RS:0;3302f0f507bd:38327 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:38327 2024-11-08T00:38:31,471 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38327-0x10117e14e9a0001, quorum=127.0.0.1:49892, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/3302f0f507bd,38327,1731026310054 2024-11-08T00:38:31,471 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42055-0x10117e14e9a0000, quorum=127.0.0.1:49892, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-08T00:38:31,471 INFO [RS:0;3302f0f507bd:38327 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-08T00:38:31,482 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [3302f0f507bd,38327,1731026310054] 2024-11-08T00:38:31,492 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/3302f0f507bd,38327,1731026310054 already deleted, retry=false 2024-11-08T00:38:31,492 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 3302f0f507bd,38327,1731026310054 expired; onlineServers=0 2024-11-08T00:38:31,492 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '3302f0f507bd,42055,1731026309888' ***** 2024-11-08T00:38:31,492 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-08T00:38:31,492 INFO [M:0;3302f0f507bd:42055 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-08T00:38:31,492 INFO [M:0;3302f0f507bd:42055 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-08T00:38:31,493 DEBUG [M:0;3302f0f507bd:42055 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-08T00:38:31,493 DEBUG [M:0;3302f0f507bd:42055 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-08T00:38:31,493 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-08T00:38:31,493 DEBUG [master/3302f0f507bd:0:becomeActiveMaster-HFileCleaner.large.0-1731026310393 {}] cleaner.HFileCleaner(306): Exit Thread[master/3302f0f507bd:0:becomeActiveMaster-HFileCleaner.large.0-1731026310393,5,FailOnTimeoutGroup] 2024-11-08T00:38:31,493 DEBUG [master/3302f0f507bd:0:becomeActiveMaster-HFileCleaner.small.0-1731026310393 {}] cleaner.HFileCleaner(306): Exit Thread[master/3302f0f507bd:0:becomeActiveMaster-HFileCleaner.small.0-1731026310393,5,FailOnTimeoutGroup] 2024-11-08T00:38:31,493 INFO [M:0;3302f0f507bd:42055 {}] hbase.ChoreService(370): Chore service for: master/3302f0f507bd:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-08T00:38:31,493 INFO [M:0;3302f0f507bd:42055 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-08T00:38:31,493 DEBUG [M:0;3302f0f507bd:42055 {}] master.HMaster(1795): Stopping service threads 2024-11-08T00:38:31,493 INFO [M:0;3302f0f507bd:42055 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-08T00:38:31,493 INFO [M:0;3302f0f507bd:42055 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-08T00:38:31,493 INFO [M:0;3302f0f507bd:42055 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-08T00:38:31,494 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-08T00:38:31,503 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42055-0x10117e14e9a0000, quorum=127.0.0.1:49892, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-08T00:38:31,503 DEBUG [M:0;3302f0f507bd:42055 {}] zookeeper.ZKUtil(347): master:42055-0x10117e14e9a0000, quorum=127.0.0.1:49892, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-08T00:38:31,503 WARN [M:0;3302f0f507bd:42055 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-08T00:38:31,503 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42055-0x10117e14e9a0000, quorum=127.0.0.1:49892, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T00:38:31,504 INFO [M:0;3302f0f507bd:42055 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:36959/user/jenkins/test-data/e117faae-8f77-bbed-8f17-433573b666d8/.lastflushedseqids 2024-11-08T00:38:31,513 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41051 is added to blk_1073741838_1014 (size=99) 2024-11-08T00:38:31,514 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39599 is added to blk_1073741838_1014 (size=99) 2024-11-08T00:38:31,515 INFO [M:0;3302f0f507bd:42055 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-08T00:38:31,515 INFO [M:0;3302f0f507bd:42055 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-08T00:38:31,515 DEBUG [M:0;3302f0f507bd:42055 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-08T00:38:31,515 INFO [M:0;3302f0f507bd:42055 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-08T00:38:31,515 DEBUG [M:0;3302f0f507bd:42055 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-08T00:38:31,515 DEBUG [M:0;3302f0f507bd:42055 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-08T00:38:31,515 DEBUG [M:0;3302f0f507bd:42055 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-08T00:38:31,515 INFO [M:0;3302f0f507bd:42055 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=7.67 KB heapSize=11.34 KB 2024-11-08T00:38:31,533 DEBUG [M:0;3302f0f507bd:42055 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36959/user/jenkins/test-data/e117faae-8f77-bbed-8f17-433573b666d8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/abfb132c1e0e4abe994fa903e36687eb is 82, key is hbase:meta,,1/info:regioninfo/1731026311088/Put/seqid=0 2024-11-08T00:38:31,537 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41051 is added to blk_1073741839_1015 (size=5672) 2024-11-08T00:38:31,537 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39599 is added to blk_1073741839_1015 (size=5672) 2024-11-08T00:38:31,538 INFO [M:0;3302f0f507bd:42055 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:36959/user/jenkins/test-data/e117faae-8f77-bbed-8f17-433573b666d8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/abfb132c1e0e4abe994fa903e36687eb 2024-11-08T00:38:31,556 DEBUG [M:0;3302f0f507bd:42055 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36959/user/jenkins/test-data/e117faae-8f77-bbed-8f17-433573b666d8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/0047606b3fe840acbad5c6959203fb02 is 240, key is \x00\x00\x00\x00\x00\x00\x00\x01/proc:d/1731026311136/Put/seqid=0 2024-11-08T00:38:31,560 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39599 is added to blk_1073741840_1016 (size=5275) 2024-11-08T00:38:31,560 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41051 is added to blk_1073741840_1016 (size=5275) 2024-11-08T00:38:31,560 INFO [M:0;3302f0f507bd:42055 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.06 KB at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:36959/user/jenkins/test-data/e117faae-8f77-bbed-8f17-433573b666d8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/0047606b3fe840acbad5c6959203fb02 2024-11-08T00:38:31,579 DEBUG [M:0;3302f0f507bd:42055 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36959/user/jenkins/test-data/e117faae-8f77-bbed-8f17-433573b666d8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/aa9fc3e59ffd4dddbcb8d791f8fd7645 is 69, key is 3302f0f507bd,38327,1731026310054/rs:state/1731026310525/Put/seqid=0 2024-11-08T00:38:31,582 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38327-0x10117e14e9a0001, quorum=127.0.0.1:49892, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-08T00:38:31,582 INFO [RS:0;3302f0f507bd:38327 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-08T00:38:31,582 INFO [RS:0;3302f0f507bd:38327 {}] regionserver.HRegionServer(1031): Exiting; stopping=3302f0f507bd,38327,1731026310054; zookeeper connection closed. 2024-11-08T00:38:31,582 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38327-0x10117e14e9a0001, quorum=127.0.0.1:49892, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-08T00:38:31,582 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@24ca0563 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@24ca0563 2024-11-08T00:38:31,583 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-08T00:38:31,583 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41051 is added to blk_1073741841_1017 (size=5156) 2024-11-08T00:38:31,583 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39599 is added to blk_1073741841_1017 (size=5156) 2024-11-08T00:38:31,584 INFO [M:0;3302f0f507bd:42055 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:36959/user/jenkins/test-data/e117faae-8f77-bbed-8f17-433573b666d8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/aa9fc3e59ffd4dddbcb8d791f8fd7645 2024-11-08T00:38:31,602 DEBUG [M:0;3302f0f507bd:42055 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36959/user/jenkins/test-data/e117faae-8f77-bbed-8f17-433573b666d8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/2287ad6506904d68b1a0e38b67c64fa2 is 52, key is load_balancer_on/state:d/1731026311188/Put/seqid=0 2024-11-08T00:38:31,606 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39599 is added to blk_1073741842_1018 (size=5056) 2024-11-08T00:38:31,606 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41051 is added to blk_1073741842_1018 (size=5056) 2024-11-08T00:38:31,607 INFO [M:0;3302f0f507bd:42055 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:36959/user/jenkins/test-data/e117faae-8f77-bbed-8f17-433573b666d8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/2287ad6506904d68b1a0e38b67c64fa2 2024-11-08T00:38:31,611 DEBUG [M:0;3302f0f507bd:42055 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36959/user/jenkins/test-data/e117faae-8f77-bbed-8f17-433573b666d8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/abfb132c1e0e4abe994fa903e36687eb as hdfs://localhost:36959/user/jenkins/test-data/e117faae-8f77-bbed-8f17-433573b666d8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/abfb132c1e0e4abe994fa903e36687eb 2024-11-08T00:38:31,615 INFO [M:0;3302f0f507bd:42055 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36959/user/jenkins/test-data/e117faae-8f77-bbed-8f17-433573b666d8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/abfb132c1e0e4abe994fa903e36687eb, entries=8, sequenceid=29, filesize=5.5 K 2024-11-08T00:38:31,616 DEBUG [M:0;3302f0f507bd:42055 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36959/user/jenkins/test-data/e117faae-8f77-bbed-8f17-433573b666d8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/0047606b3fe840acbad5c6959203fb02 as hdfs://localhost:36959/user/jenkins/test-data/e117faae-8f77-bbed-8f17-433573b666d8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/0047606b3fe840acbad5c6959203fb02 2024-11-08T00:38:31,620 INFO [M:0;3302f0f507bd:42055 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36959/user/jenkins/test-data/e117faae-8f77-bbed-8f17-433573b666d8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/0047606b3fe840acbad5c6959203fb02, entries=3, sequenceid=29, filesize=5.2 K 2024-11-08T00:38:31,621 DEBUG [M:0;3302f0f507bd:42055 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36959/user/jenkins/test-data/e117faae-8f77-bbed-8f17-433573b666d8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/aa9fc3e59ffd4dddbcb8d791f8fd7645 as hdfs://localhost:36959/user/jenkins/test-data/e117faae-8f77-bbed-8f17-433573b666d8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/aa9fc3e59ffd4dddbcb8d791f8fd7645 2024-11-08T00:38:31,625 INFO [M:0;3302f0f507bd:42055 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36959/user/jenkins/test-data/e117faae-8f77-bbed-8f17-433573b666d8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/aa9fc3e59ffd4dddbcb8d791f8fd7645, entries=1, sequenceid=29, filesize=5.0 K 2024-11-08T00:38:31,626 DEBUG [M:0;3302f0f507bd:42055 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36959/user/jenkins/test-data/e117faae-8f77-bbed-8f17-433573b666d8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/2287ad6506904d68b1a0e38b67c64fa2 as hdfs://localhost:36959/user/jenkins/test-data/e117faae-8f77-bbed-8f17-433573b666d8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/2287ad6506904d68b1a0e38b67c64fa2 2024-11-08T00:38:31,630 INFO [M:0;3302f0f507bd:42055 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36959/user/jenkins/test-data/e117faae-8f77-bbed-8f17-433573b666d8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/2287ad6506904d68b1a0e38b67c64fa2, entries=1, sequenceid=29, filesize=4.9 K 2024-11-08T00:38:31,631 INFO [M:0;3302f0f507bd:42055 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 116ms, sequenceid=29, compaction requested=false 2024-11-08T00:38:31,632 INFO [M:0;3302f0f507bd:42055 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-08T00:38:31,633 DEBUG [M:0;3302f0f507bd:42055 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731026311515Disabling compacts and flushes for region at 1731026311515Disabling writes for close at 1731026311515Obtaining lock to block concurrent updates at 1731026311515Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731026311515Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=7850, getHeapSize=11544, getOffHeapSize=0, getCellsCount=36 at 1731026311516 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731026311517 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731026311517Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731026311533 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731026311533Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731026311543 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731026311556 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731026311556Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731026311565 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731026311579 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731026311579Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731026311587 (+8 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731026311602 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731026311602Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@589cfc8b: reopening flushed file at 1731026311610 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4ffd807b: reopening flushed file at 1731026311615 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@c0e34ee: reopening flushed file at 1731026311620 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6d666e58: reopening flushed file at 1731026311625 (+5 ms)Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 116ms, sequenceid=29, compaction requested=false at 1731026311631 (+6 ms)Writing region close event to WAL at 1731026311632 (+1 ms)Closed at 1731026311632 2024-11-08T00:38:31,633 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:38:31,633 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:38:31,633 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:38:31,633 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:38:31,633 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T00:38:31,635 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41051 is added to blk_1073741830_1006 (size=10311) 2024-11-08T00:38:31,635 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39599 is added to blk_1073741830_1006 (size=10311) 2024-11-08T00:38:31,636 INFO [M:0;3302f0f507bd:42055 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-08T00:38:31,636 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-08T00:38:31,636 INFO [M:0;3302f0f507bd:42055 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:42055 2024-11-08T00:38:31,636 INFO [M:0;3302f0f507bd:42055 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-08T00:38:31,748 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42055-0x10117e14e9a0000, quorum=127.0.0.1:49892, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-08T00:38:31,748 INFO [M:0;3302f0f507bd:42055 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-08T00:38:31,748 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42055-0x10117e14e9a0000, quorum=127.0.0.1:49892, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-08T00:38:31,753 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3e7eee62{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-08T00:38:31,754 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@43bb2f4c{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-08T00:38:31,754 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-08T00:38:31,754 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5991282a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-08T00:38:31,755 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@59a6b271{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/35875a07-c617-9fdd-5827-e7b831ebad2b/hadoop.log.dir/,STOPPED} 2024-11-08T00:38:31,758 WARN [BP-1714520190-172.17.0.3-1731026307427 heartbeating to localhost/127.0.0.1:36959 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-08T00:38:31,758 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-08T00:38:31,758 WARN [BP-1714520190-172.17.0.3-1731026307427 heartbeating to localhost/127.0.0.1:36959 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1714520190-172.17.0.3-1731026307427 (Datanode Uuid ce2448ca-6472-4eda-bd62-030db4343341) service to localhost/127.0.0.1:36959 2024-11-08T00:38:31,758 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-08T00:38:31,759 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/35875a07-c617-9fdd-5827-e7b831ebad2b/cluster_4f68b44e-a80d-5fbd-bf53-4903c75c3d25/data/data3/current/BP-1714520190-172.17.0.3-1731026307427 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-08T00:38:31,759 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/35875a07-c617-9fdd-5827-e7b831ebad2b/cluster_4f68b44e-a80d-5fbd-bf53-4903c75c3d25/data/data4/current/BP-1714520190-172.17.0.3-1731026307427 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-08T00:38:31,759 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-08T00:38:31,761 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@44402286{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-08T00:38:31,761 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2753102b{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-08T00:38:31,761 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-08T00:38:31,761 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@68813b82{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-08T00:38:31,762 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5f638c2a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/35875a07-c617-9fdd-5827-e7b831ebad2b/hadoop.log.dir/,STOPPED} 2024-11-08T00:38:31,762 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-08T00:38:31,762 WARN [BP-1714520190-172.17.0.3-1731026307427 heartbeating to localhost/127.0.0.1:36959 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-08T00:38:31,763 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-08T00:38:31,763 WARN [BP-1714520190-172.17.0.3-1731026307427 heartbeating to localhost/127.0.0.1:36959 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1714520190-172.17.0.3-1731026307427 (Datanode Uuid 53feb631-d0fe-4388-a977-97c181994167) service to localhost/127.0.0.1:36959 2024-11-08T00:38:31,763 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/35875a07-c617-9fdd-5827-e7b831ebad2b/cluster_4f68b44e-a80d-5fbd-bf53-4903c75c3d25/data/data1/current/BP-1714520190-172.17.0.3-1731026307427 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-08T00:38:31,763 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/35875a07-c617-9fdd-5827-e7b831ebad2b/cluster_4f68b44e-a80d-5fbd-bf53-4903c75c3d25/data/data2/current/BP-1714520190-172.17.0.3-1731026307427 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-08T00:38:31,763 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-08T00:38:31,769 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@17951be7{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-08T00:38:31,770 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@e9b8f9f{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-08T00:38:31,770 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-08T00:38:31,770 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@372f7d77{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-08T00:38:31,770 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4c4c959a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/35875a07-c617-9fdd-5827-e7b831ebad2b/hadoop.log.dir/,STOPPED} 2024-11-08T00:38:31,776 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-08T00:38:31,781 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,42513,1731026115632/3302f0f507bd%2C42513%2C1731026115632.meta.1731026116690.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:38:31,781 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42193/user/jenkins/test-data/21101859-ad4a-c1e3-0f17-2c410e2e15f8/WALs/3302f0f507bd,39151,1731026116929/3302f0f507bd%2C39151%2C1731026116929.1731026117246 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-08T00:38:31,792 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-08T00:38:31,801 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnNothingWritten Thread=271 (was 233) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:36959 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:36959 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: LeaseRenewer:jenkins@localhost:36959 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.7@localhost:36959 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:36959 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:36959 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:36959 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:36959 from jenkins.hfs.7 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-44-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=536 (was 509) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=145 (was 123) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=6707 (was 6714)