2024-12-03 18:53:22,757 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca 2024-12-03 18:53:22,767 main DEBUG Took 0.008462 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-12-03 18:53:22,768 main DEBUG PluginManager 'Core' found 129 plugins 2024-12-03 18:53:22,768 main DEBUG PluginManager 'Level' found 0 plugins 2024-12-03 18:53:22,769 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-12-03 18:53:22,770 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-03 18:53:22,776 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-12-03 18:53:22,788 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-03 18:53:22,789 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-03 18:53:22,790 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-03 18:53:22,790 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-03 18:53:22,790 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-03 18:53:22,791 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-03 18:53:22,791 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-03 18:53:22,792 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-03 18:53:22,792 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-03 18:53:22,793 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-03 18:53:22,794 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-03 18:53:22,794 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-03 18:53:22,795 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-03 18:53:22,795 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-03 18:53:22,796 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-03 18:53:22,796 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-03 18:53:22,797 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-03 18:53:22,797 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-03 18:53:22,797 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-03 18:53:22,798 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-03 18:53:22,798 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-03 18:53:22,799 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-03 18:53:22,799 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-03 18:53:22,799 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-03 18:53:22,800 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-03 18:53:22,800 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-12-03 18:53:22,802 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-03 18:53:22,804 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-12-03 18:53:22,806 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-12-03 18:53:22,806 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-12-03 18:53:22,808 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-12-03 18:53:22,808 main DEBUG PluginManager 'Converter' found 47 plugins 2024-12-03 18:53:22,818 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-12-03 18:53:22,822 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-12-03 18:53:22,824 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-12-03 18:53:22,824 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-12-03 18:53:22,825 main DEBUG createAppenders(={Console}) 2024-12-03 18:53:22,826 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca initialized 2024-12-03 18:53:22,826 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca 2024-12-03 18:53:22,826 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca OK. 2024-12-03 18:53:22,827 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-12-03 18:53:22,827 main DEBUG OutputStream closed 2024-12-03 18:53:22,828 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-12-03 18:53:22,828 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-12-03 18:53:22,828 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@6404f418 OK 2024-12-03 18:53:22,910 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-12-03 18:53:22,912 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-12-03 18:53:22,913 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-12-03 18:53:22,914 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-12-03 18:53:22,914 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-12-03 18:53:22,915 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-12-03 18:53:22,915 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-12-03 18:53:22,915 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-12-03 18:53:22,916 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-12-03 18:53:22,916 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-12-03 18:53:22,917 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-12-03 18:53:22,917 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-12-03 18:53:22,917 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-12-03 18:53:22,918 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-12-03 18:53:22,918 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-12-03 18:53:22,918 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-12-03 18:53:22,918 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-12-03 18:53:22,919 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-12-03 18:53:22,921 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-03 18:53:22,921 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-logging/target/hbase-logging-4.0.0-alpha-1-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@6dab9b6d) with optional ClassLoader: null 2024-12-03 18:53:22,921 main DEBUG Shutdown hook enabled. Registering a new one. 2024-12-03 18:53:22,922 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@6dab9b6d] started OK. 2024-12-03T18:53:23,152 DEBUG [main {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cf2fda39-4b5a-bc74-655b-4e38b1b1efcf 2024-12-03 18:53:23,155 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-12-03 18:53:23,155 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-03T18:53:23,163 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.regionserver.wal.TestLogRolling timeout: 13 mins 2024-12-03T18:53:23,198 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testSlowSyncLogRolling Thread=12, OpenFileDescriptor=287, MaxFileDescriptor=1048576, SystemLoadAverage=258, ProcessCount=11, AvailableMemoryMB=7340 2024-12-03T18:53:23,201 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-03T18:53:23,217 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cf2fda39-4b5a-bc74-655b-4e38b1b1efcf/cluster_fd2f5795-a177-0b83-b69e-17d075f298ed, deleteOnExit=true 2024-12-03T18:53:23,217 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-03T18:53:23,218 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cf2fda39-4b5a-bc74-655b-4e38b1b1efcf/test.cache.data in system properties and HBase conf 2024-12-03T18:53:23,219 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cf2fda39-4b5a-bc74-655b-4e38b1b1efcf/hadoop.tmp.dir in system properties and HBase conf 2024-12-03T18:53:23,219 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cf2fda39-4b5a-bc74-655b-4e38b1b1efcf/hadoop.log.dir in system properties and HBase conf 2024-12-03T18:53:23,220 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cf2fda39-4b5a-bc74-655b-4e38b1b1efcf/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-03T18:53:23,221 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cf2fda39-4b5a-bc74-655b-4e38b1b1efcf/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-03T18:53:23,221 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-03T18:53:23,315 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-12-03T18:53:23,409 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-03T18:53:23,413 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cf2fda39-4b5a-bc74-655b-4e38b1b1efcf/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-03T18:53:23,414 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cf2fda39-4b5a-bc74-655b-4e38b1b1efcf/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-03T18:53:23,415 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cf2fda39-4b5a-bc74-655b-4e38b1b1efcf/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-03T18:53:23,416 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cf2fda39-4b5a-bc74-655b-4e38b1b1efcf/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-03T18:53:23,416 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cf2fda39-4b5a-bc74-655b-4e38b1b1efcf/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-03T18:53:23,417 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cf2fda39-4b5a-bc74-655b-4e38b1b1efcf/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-03T18:53:23,418 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cf2fda39-4b5a-bc74-655b-4e38b1b1efcf/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-03T18:53:23,418 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cf2fda39-4b5a-bc74-655b-4e38b1b1efcf/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-03T18:53:23,419 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cf2fda39-4b5a-bc74-655b-4e38b1b1efcf/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-03T18:53:23,420 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cf2fda39-4b5a-bc74-655b-4e38b1b1efcf/nfs.dump.dir in system properties and HBase conf 2024-12-03T18:53:23,420 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cf2fda39-4b5a-bc74-655b-4e38b1b1efcf/java.io.tmpdir in system properties and HBase conf 2024-12-03T18:53:23,421 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cf2fda39-4b5a-bc74-655b-4e38b1b1efcf/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-03T18:53:23,421 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cf2fda39-4b5a-bc74-655b-4e38b1b1efcf/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-03T18:53:23,422 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cf2fda39-4b5a-bc74-655b-4e38b1b1efcf/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-03T18:53:23,837 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-03T18:53:24,426 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-12-03T18:53:24,488 INFO [Time-limited test {}] log.Log(170): Logging initialized @2364ms to org.eclipse.jetty.util.log.Slf4jLog 2024-12-03T18:53:24,552 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-03T18:53:24,606 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-03T18:53:24,625 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-03T18:53:24,626 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-03T18:53:24,627 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-03T18:53:24,639 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-03T18:53:24,642 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@ddc8467{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cf2fda39-4b5a-bc74-655b-4e38b1b1efcf/hadoop.log.dir/,AVAILABLE} 2024-12-03T18:53:24,643 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@70be1389{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-03T18:53:24,817 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@735fa16a{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cf2fda39-4b5a-bc74-655b-4e38b1b1efcf/java.io.tmpdir/jetty-localhost-40245-hadoop-hdfs-3_4_1-tests_jar-_-any-12229943807133281916/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-03T18:53:24,824 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6c26a5a3{HTTP/1.1, (http/1.1)}{localhost:40245} 2024-12-03T18:53:24,825 INFO [Time-limited test {}] server.Server(415): Started @2701ms 2024-12-03T18:53:24,850 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-03T18:53:25,361 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-03T18:53:25,366 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-03T18:53:25,368 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-03T18:53:25,368 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-03T18:53:25,369 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-03T18:53:25,371 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@28778f0f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cf2fda39-4b5a-bc74-655b-4e38b1b1efcf/hadoop.log.dir/,AVAILABLE} 2024-12-03T18:53:25,372 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@371e191c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-03T18:53:25,465 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7b07d1ba{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cf2fda39-4b5a-bc74-655b-4e38b1b1efcf/java.io.tmpdir/jetty-localhost-33437-hadoop-hdfs-3_4_1-tests_jar-_-any-2802510810783535596/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T18:53:25,466 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@43e0a762{HTTP/1.1, (http/1.1)}{localhost:33437} 2024-12-03T18:53:25,466 INFO [Time-limited test {}] server.Server(415): Started @3343ms 2024-12-03T18:53:25,510 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-03T18:53:25,600 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-03T18:53:25,605 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-03T18:53:25,606 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-03T18:53:25,607 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-03T18:53:25,607 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-03T18:53:25,608 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@11effdcd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cf2fda39-4b5a-bc74-655b-4e38b1b1efcf/hadoop.log.dir/,AVAILABLE} 2024-12-03T18:53:25,608 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2d48d695{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-03T18:53:25,704 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1bf97579{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cf2fda39-4b5a-bc74-655b-4e38b1b1efcf/java.io.tmpdir/jetty-localhost-34361-hadoop-hdfs-3_4_1-tests_jar-_-any-7900851102668309223/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T18:53:25,705 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@22b88bcb{HTTP/1.1, (http/1.1)}{localhost:34361} 2024-12-03T18:53:25,705 INFO [Time-limited test {}] server.Server(415): Started @3582ms 2024-12-03T18:53:25,707 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-03T18:53:26,588 WARN [Thread-101 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cf2fda39-4b5a-bc74-655b-4e38b1b1efcf/cluster_fd2f5795-a177-0b83-b69e-17d075f298ed/data/data4/current/BP-397332723-172.17.0.2-1733252003910/current, will proceed with Du for space computation calculation, 2024-12-03T18:53:26,588 WARN [Thread-100 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cf2fda39-4b5a-bc74-655b-4e38b1b1efcf/cluster_fd2f5795-a177-0b83-b69e-17d075f298ed/data/data2/current/BP-397332723-172.17.0.2-1733252003910/current, will proceed with Du for space computation calculation, 2024-12-03T18:53:26,588 WARN [Thread-99 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cf2fda39-4b5a-bc74-655b-4e38b1b1efcf/cluster_fd2f5795-a177-0b83-b69e-17d075f298ed/data/data3/current/BP-397332723-172.17.0.2-1733252003910/current, will proceed with Du for space computation calculation, 2024-12-03T18:53:26,588 WARN [Thread-98 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cf2fda39-4b5a-bc74-655b-4e38b1b1efcf/cluster_fd2f5795-a177-0b83-b69e-17d075f298ed/data/data1/current/BP-397332723-172.17.0.2-1733252003910/current, will proceed with Du for space computation calculation, 2024-12-03T18:53:26,641 WARN [Thread-81 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-03T18:53:26,642 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-03T18:53:26,691 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x1ccac765a7857f08 with lease ID 0xde7e957bec983269: Processing first storage report for DS-3fd4f226-4fcd-46db-acad-aaba0db1519b from datanode DatanodeRegistration(127.0.0.1:42575, datanodeUuid=2b32ee1c-a044-448c-9ab9-b2d4b4d7bd7b, infoPort=46773, infoSecurePort=0, ipcPort=37687, storageInfo=lv=-57;cid=testClusterID;nsid=219649978;c=1733252003910) 2024-12-03T18:53:26,692 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x1ccac765a7857f08 with lease ID 0xde7e957bec983269: from storage DS-3fd4f226-4fcd-46db-acad-aaba0db1519b node DatanodeRegistration(127.0.0.1:42575, datanodeUuid=2b32ee1c-a044-448c-9ab9-b2d4b4d7bd7b, infoPort=46773, infoSecurePort=0, ipcPort=37687, storageInfo=lv=-57;cid=testClusterID;nsid=219649978;c=1733252003910), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-03T18:53:26,692 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xdccfa413a89a8d73 with lease ID 0xde7e957bec983268: Processing first storage report for DS-8a563e24-9cd4-4f83-a5d0-093d49ec545e from datanode DatanodeRegistration(127.0.0.1:37503, datanodeUuid=2020067c-ee4b-43a7-b8b8-c20074820a82, infoPort=40269, infoSecurePort=0, ipcPort=34651, storageInfo=lv=-57;cid=testClusterID;nsid=219649978;c=1733252003910) 2024-12-03T18:53:26,692 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xdccfa413a89a8d73 with lease ID 0xde7e957bec983268: from storage DS-8a563e24-9cd4-4f83-a5d0-093d49ec545e node DatanodeRegistration(127.0.0.1:37503, datanodeUuid=2020067c-ee4b-43a7-b8b8-c20074820a82, infoPort=40269, infoSecurePort=0, ipcPort=34651, storageInfo=lv=-57;cid=testClusterID;nsid=219649978;c=1733252003910), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-03T18:53:26,693 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x1ccac765a7857f08 with lease ID 0xde7e957bec983269: Processing first storage report for DS-24bd0ecf-027c-4e1a-8e5f-a106351c7e01 from datanode DatanodeRegistration(127.0.0.1:42575, datanodeUuid=2b32ee1c-a044-448c-9ab9-b2d4b4d7bd7b, infoPort=46773, infoSecurePort=0, ipcPort=37687, storageInfo=lv=-57;cid=testClusterID;nsid=219649978;c=1733252003910) 2024-12-03T18:53:26,693 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x1ccac765a7857f08 with lease ID 0xde7e957bec983269: from storage DS-24bd0ecf-027c-4e1a-8e5f-a106351c7e01 node DatanodeRegistration(127.0.0.1:42575, datanodeUuid=2b32ee1c-a044-448c-9ab9-b2d4b4d7bd7b, infoPort=46773, infoSecurePort=0, ipcPort=37687, storageInfo=lv=-57;cid=testClusterID;nsid=219649978;c=1733252003910), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-03T18:53:26,693 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xdccfa413a89a8d73 with lease ID 0xde7e957bec983268: Processing first storage report for DS-45604bbc-ae64-4781-844a-309303e6a18b from datanode DatanodeRegistration(127.0.0.1:37503, datanodeUuid=2020067c-ee4b-43a7-b8b8-c20074820a82, infoPort=40269, infoSecurePort=0, ipcPort=34651, storageInfo=lv=-57;cid=testClusterID;nsid=219649978;c=1733252003910) 2024-12-03T18:53:26,693 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xdccfa413a89a8d73 with lease ID 0xde7e957bec983268: from storage DS-45604bbc-ae64-4781-844a-309303e6a18b node DatanodeRegistration(127.0.0.1:37503, datanodeUuid=2020067c-ee4b-43a7-b8b8-c20074820a82, infoPort=40269, infoSecurePort=0, ipcPort=34651, storageInfo=lv=-57;cid=testClusterID;nsid=219649978;c=1733252003910), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-03T18:53:26,704 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cf2fda39-4b5a-bc74-655b-4e38b1b1efcf 2024-12-03T18:53:26,765 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cf2fda39-4b5a-bc74-655b-4e38b1b1efcf/cluster_fd2f5795-a177-0b83-b69e-17d075f298ed/zookeeper_0, clientPort=51229, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cf2fda39-4b5a-bc74-655b-4e38b1b1efcf/cluster_fd2f5795-a177-0b83-b69e-17d075f298ed/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cf2fda39-4b5a-bc74-655b-4e38b1b1efcf/cluster_fd2f5795-a177-0b83-b69e-17d075f298ed/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-03T18:53:26,775 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=51229 2024-12-03T18:53:26,784 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T18:53:26,787 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T18:53:26,976 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42575 is added to blk_1073741825_1001 (size=7) 2024-12-03T18:53:26,977 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37503 is added to blk_1073741825_1001 (size=7) 2024-12-03T18:53:27,392 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:42673/user/jenkins/test-data/6807bb60-f78c-0ede-7870-b94fe8933208 with version=8 2024-12-03T18:53:27,393 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:42673/user/jenkins/test-data/6807bb60-f78c-0ede-7870-b94fe8933208/hbase-staging 2024-12-03T18:53:27,462 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-12-03T18:53:27,703 INFO [Time-limited test {}] client.ConnectionUtils(128): master/db5a5ccf5be8:0 server-side Connection retries=45 2024-12-03T18:53:27,711 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-03T18:53:27,711 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-03T18:53:27,715 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-03T18:53:27,715 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-03T18:53:27,716 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-03T18:53:27,826 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-03T18:53:27,880 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-12-03T18:53:27,888 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-12-03T18:53:27,891 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-03T18:53:27,912 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 79913 (auto-detected) 2024-12-03T18:53:27,913 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-12-03T18:53:27,929 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:37317 2024-12-03T18:53:27,947 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:37317 connecting to ZooKeeper ensemble=127.0.0.1:51229 2024-12-03T18:53:28,070 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:373170x0, quorum=127.0.0.1:51229, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-03T18:53:28,074 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:37317-0x1019c8abf090000 connected 2024-12-03T18:53:28,168 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T18:53:28,171 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T18:53:28,182 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:37317-0x1019c8abf090000, quorum=127.0.0.1:51229, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-03T18:53:28,186 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:42673/user/jenkins/test-data/6807bb60-f78c-0ede-7870-b94fe8933208, hbase.cluster.distributed=false 2024-12-03T18:53:28,207 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:37317-0x1019c8abf090000, quorum=127.0.0.1:51229, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-03T18:53:28,211 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=37317 2024-12-03T18:53:28,212 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=37317 2024-12-03T18:53:28,212 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=37317 2024-12-03T18:53:28,213 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=37317 2024-12-03T18:53:28,213 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=37317 2024-12-03T18:53:28,313 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/db5a5ccf5be8:0 server-side Connection retries=45 2024-12-03T18:53:28,314 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-03T18:53:28,315 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-03T18:53:28,315 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-03T18:53:28,315 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-03T18:53:28,315 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-03T18:53:28,318 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-03T18:53:28,321 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-03T18:53:28,322 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:37095 2024-12-03T18:53:28,324 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:37095 connecting to ZooKeeper ensemble=127.0.0.1:51229 2024-12-03T18:53:28,326 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T18:53:28,330 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T18:53:28,344 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:370950x0, quorum=127.0.0.1:51229, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-03T18:53:28,345 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:370950x0, quorum=127.0.0.1:51229, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-03T18:53:28,345 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:37095-0x1019c8abf090001 connected 2024-12-03T18:53:28,349 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-03T18:53:28,358 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-03T18:53:28,361 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37095-0x1019c8abf090001, quorum=127.0.0.1:51229, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-03T18:53:28,367 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37095-0x1019c8abf090001, quorum=127.0.0.1:51229, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-03T18:53:28,368 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=37095 2024-12-03T18:53:28,368 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=37095 2024-12-03T18:53:28,369 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=37095 2024-12-03T18:53:28,370 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=37095 2024-12-03T18:53:28,370 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=37095 2024-12-03T18:53:28,384 DEBUG [M:0;db5a5ccf5be8:37317 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;db5a5ccf5be8:37317 2024-12-03T18:53:28,385 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/db5a5ccf5be8,37317,1733252007561 2024-12-03T18:53:28,397 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37317-0x1019c8abf090000, quorum=127.0.0.1:51229, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-03T18:53:28,397 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37095-0x1019c8abf090001, quorum=127.0.0.1:51229, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-03T18:53:28,399 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:37317-0x1019c8abf090000, quorum=127.0.0.1:51229, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/db5a5ccf5be8,37317,1733252007561 2024-12-03T18:53:28,428 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37095-0x1019c8abf090001, quorum=127.0.0.1:51229, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-03T18:53:28,428 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37317-0x1019c8abf090000, quorum=127.0.0.1:51229, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T18:53:28,428 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37095-0x1019c8abf090001, quorum=127.0.0.1:51229, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T18:53:28,429 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:37317-0x1019c8abf090000, quorum=127.0.0.1:51229, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-03T18:53:28,430 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/db5a5ccf5be8,37317,1733252007561 from backup master directory 2024-12-03T18:53:28,438 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37317-0x1019c8abf090000, quorum=127.0.0.1:51229, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/db5a5ccf5be8,37317,1733252007561 2024-12-03T18:53:28,438 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37095-0x1019c8abf090001, quorum=127.0.0.1:51229, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-03T18:53:28,438 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37317-0x1019c8abf090000, quorum=127.0.0.1:51229, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-03T18:53:28,439 WARN [master/db5a5ccf5be8:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-03T18:53:28,439 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=db5a5ccf5be8,37317,1733252007561 2024-12-03T18:53:28,441 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-12-03T18:53:28,442 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-12-03T18:53:28,492 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:42673/user/jenkins/test-data/6807bb60-f78c-0ede-7870-b94fe8933208/hbase.id] with ID: 4268db8e-e7a9-4c21-ab90-821a9ca2cb79 2024-12-03T18:53:28,493 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:42673/user/jenkins/test-data/6807bb60-f78c-0ede-7870-b94fe8933208/.tmp/hbase.id 2024-12-03T18:53:28,503 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37503 is added to blk_1073741826_1002 (size=42) 2024-12-03T18:53:28,504 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42575 is added to blk_1073741826_1002 (size=42) 2024-12-03T18:53:28,505 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:42673/user/jenkins/test-data/6807bb60-f78c-0ede-7870-b94fe8933208/.tmp/hbase.id]:[hdfs://localhost:42673/user/jenkins/test-data/6807bb60-f78c-0ede-7870-b94fe8933208/hbase.id] 2024-12-03T18:53:28,546 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T18:53:28,551 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-03T18:53:28,567 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 14ms. 2024-12-03T18:53:28,575 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37317-0x1019c8abf090000, quorum=127.0.0.1:51229, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T18:53:28,575 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37095-0x1019c8abf090001, quorum=127.0.0.1:51229, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T18:53:28,591 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37503 is added to blk_1073741827_1003 (size=196) 2024-12-03T18:53:28,592 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42575 is added to blk_1073741827_1003 (size=196) 2024-12-03T18:53:28,605 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-03T18:53:28,608 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-03T18:53:28,614 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-03T18:53:28,643 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42575 is added to blk_1073741828_1004 (size=1189) 2024-12-03T18:53:28,644 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37503 is added to blk_1073741828_1004 (size=1189) 2024-12-03T18:53:28,658 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:42673/user/jenkins/test-data/6807bb60-f78c-0ede-7870-b94fe8933208/MasterData/data/master/store 2024-12-03T18:53:28,678 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42575 is added to blk_1073741829_1005 (size=34) 2024-12-03T18:53:28,678 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37503 is added to blk_1073741829_1005 (size=34) 2024-12-03T18:53:28,683 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-12-03T18:53:28,685 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T18:53:28,687 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-03T18:53:28,687 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T18:53:28,687 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T18:53:28,688 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-03T18:53:28,689 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T18:53:28,689 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T18:53:28,690 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733252008686Disabling compacts and flushes for region at 1733252008686Disabling writes for close at 1733252008689 (+3 ms)Writing region close event to WAL at 1733252008689Closed at 1733252008689 2024-12-03T18:53:28,693 WARN [master/db5a5ccf5be8:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:42673/user/jenkins/test-data/6807bb60-f78c-0ede-7870-b94fe8933208/MasterData/data/master/store/.initializing 2024-12-03T18:53:28,693 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:42673/user/jenkins/test-data/6807bb60-f78c-0ede-7870-b94fe8933208/MasterData/WALs/db5a5ccf5be8,37317,1733252007561 2024-12-03T18:53:28,713 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=db5a5ccf5be8%2C37317%2C1733252007561, suffix=, logDir=hdfs://localhost:42673/user/jenkins/test-data/6807bb60-f78c-0ede-7870-b94fe8933208/MasterData/WALs/db5a5ccf5be8,37317,1733252007561, archiveDir=hdfs://localhost:42673/user/jenkins/test-data/6807bb60-f78c-0ede-7870-b94fe8933208/MasterData/oldWALs, maxLogs=10 2024-12-03T18:53:28,721 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor db5a5ccf5be8%2C37317%2C1733252007561.1733252008717 2024-12-03T18:53:28,740 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/6807bb60-f78c-0ede-7870-b94fe8933208/MasterData/WALs/db5a5ccf5be8,37317,1733252007561/db5a5ccf5be8%2C37317%2C1733252007561.1733252008717 2024-12-03T18:53:28,750 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46773:46773),(127.0.0.1/127.0.0.1:40269:40269)] 2024-12-03T18:53:28,751 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-03T18:53:28,752 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T18:53:28,755 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-03T18:53:28,756 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-03T18:53:28,788 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-03T18:53:28,811 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-03T18:53:28,814 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T18:53:28,816 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T18:53:28,817 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-03T18:53:28,820 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-03T18:53:28,820 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T18:53:28,821 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T18:53:28,821 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-03T18:53:28,824 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-03T18:53:28,825 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T18:53:28,826 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T18:53:28,826 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-03T18:53:28,828 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-03T18:53:28,828 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T18:53:28,829 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T18:53:28,830 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-03T18:53:28,834 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42673/user/jenkins/test-data/6807bb60-f78c-0ede-7870-b94fe8933208/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-03T18:53:28,835 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42673/user/jenkins/test-data/6807bb60-f78c-0ede-7870-b94fe8933208/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-03T18:53:28,841 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-03T18:53:28,842 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-03T18:53:28,845 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-03T18:53:28,850 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-03T18:53:28,854 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42673/user/jenkins/test-data/6807bb60-f78c-0ede-7870-b94fe8933208/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-03T18:53:28,856 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=757588, jitterRate=-0.03667771816253662}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-03T18:53:28,862 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733252008767Initializing all the Stores at 1733252008769 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733252008769Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733252008770 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733252008770Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733252008770Cleaning up temporary data from old regions at 1733252008842 (+72 ms)Region opened successfully at 1733252008862 (+20 ms) 2024-12-03T18:53:28,864 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-03T18:53:28,895 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@41d55699, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=db5a5ccf5be8/172.17.0.2:0 2024-12-03T18:53:28,921 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-03T18:53:28,930 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-03T18:53:28,930 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-03T18:53:28,933 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-03T18:53:28,935 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 1 msec 2024-12-03T18:53:28,939 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 4 msec 2024-12-03T18:53:28,939 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-03T18:53:28,962 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-03T18:53:28,969 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37317-0x1019c8abf090000, quorum=127.0.0.1:51229, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-03T18:53:29,036 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-03T18:53:29,040 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-03T18:53:29,042 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37317-0x1019c8abf090000, quorum=127.0.0.1:51229, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-03T18:53:29,049 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-03T18:53:29,051 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-03T18:53:29,056 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37317-0x1019c8abf090000, quorum=127.0.0.1:51229, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-03T18:53:29,068 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-03T18:53:29,070 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37317-0x1019c8abf090000, quorum=127.0.0.1:51229, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-03T18:53:29,080 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-03T18:53:29,103 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37317-0x1019c8abf090000, quorum=127.0.0.1:51229, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-03T18:53:29,112 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-03T18:53:29,122 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37095-0x1019c8abf090001, quorum=127.0.0.1:51229, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-03T18:53:29,122 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37317-0x1019c8abf090000, quorum=127.0.0.1:51229, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-03T18:53:29,123 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37095-0x1019c8abf090001, quorum=127.0.0.1:51229, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T18:53:29,123 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37317-0x1019c8abf090000, quorum=127.0.0.1:51229, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T18:53:29,127 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=db5a5ccf5be8,37317,1733252007561, sessionid=0x1019c8abf090000, setting cluster-up flag (Was=false) 2024-12-03T18:53:29,154 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37317-0x1019c8abf090000, quorum=127.0.0.1:51229, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T18:53:29,154 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37095-0x1019c8abf090001, quorum=127.0.0.1:51229, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T18:53:29,186 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-03T18:53:29,191 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=db5a5ccf5be8,37317,1733252007561 2024-12-03T18:53:29,217 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37317-0x1019c8abf090000, quorum=127.0.0.1:51229, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T18:53:29,217 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37095-0x1019c8abf090001, quorum=127.0.0.1:51229, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T18:53:29,249 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-03T18:53:29,254 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=db5a5ccf5be8,37317,1733252007561 2024-12-03T18:53:29,263 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:42673/user/jenkins/test-data/6807bb60-f78c-0ede-7870-b94fe8933208/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-03T18:53:29,274 INFO [RS:0;db5a5ccf5be8:37095 {}] regionserver.HRegionServer(746): ClusterId : 4268db8e-e7a9-4c21-ab90-821a9ca2cb79 2024-12-03T18:53:29,277 DEBUG [RS:0;db5a5ccf5be8:37095 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-03T18:53:29,293 DEBUG [RS:0;db5a5ccf5be8:37095 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-03T18:53:29,293 DEBUG [RS:0;db5a5ccf5be8:37095 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-03T18:53:29,303 DEBUG [RS:0;db5a5ccf5be8:37095 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-03T18:53:29,304 DEBUG [RS:0;db5a5ccf5be8:37095 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4216028d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=db5a5ccf5be8/172.17.0.2:0 2024-12-03T18:53:29,321 DEBUG [RS:0;db5a5ccf5be8:37095 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;db5a5ccf5be8:37095 2024-12-03T18:53:29,323 INFO [RS:0;db5a5ccf5be8:37095 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-03T18:53:29,323 INFO [RS:0;db5a5ccf5be8:37095 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-03T18:53:29,323 DEBUG [RS:0;db5a5ccf5be8:37095 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-03T18:53:29,326 INFO [RS:0;db5a5ccf5be8:37095 {}] regionserver.HRegionServer(2659): reportForDuty to master=db5a5ccf5be8,37317,1733252007561 with port=37095, startcode=1733252008279 2024-12-03T18:53:29,332 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-03T18:53:29,338 DEBUG [RS:0;db5a5ccf5be8:37095 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-03T18:53:29,343 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-03T18:53:29,351 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-03T18:53:29,356 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: db5a5ccf5be8,37317,1733252007561 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-03T18:53:29,363 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/db5a5ccf5be8:0, corePoolSize=5, maxPoolSize=5 2024-12-03T18:53:29,363 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/db5a5ccf5be8:0, corePoolSize=5, maxPoolSize=5 2024-12-03T18:53:29,363 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/db5a5ccf5be8:0, corePoolSize=5, maxPoolSize=5 2024-12-03T18:53:29,364 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/db5a5ccf5be8:0, corePoolSize=5, maxPoolSize=5 2024-12-03T18:53:29,364 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/db5a5ccf5be8:0, corePoolSize=10, maxPoolSize=10 2024-12-03T18:53:29,364 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/db5a5ccf5be8:0, corePoolSize=1, maxPoolSize=1 2024-12-03T18:53:29,364 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/db5a5ccf5be8:0, corePoolSize=2, maxPoolSize=2 2024-12-03T18:53:29,364 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/db5a5ccf5be8:0, corePoolSize=1, maxPoolSize=1 2024-12-03T18:53:29,366 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733252039366 2024-12-03T18:53:29,368 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-03T18:53:29,369 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-03T18:53:29,370 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-03T18:53:29,370 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-03T18:53:29,372 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-03T18:53:29,373 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-03T18:53:29,373 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-03T18:53:29,373 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-03T18:53:29,377 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T18:53:29,376 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-03T18:53:29,378 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-03T18:53:29,381 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-03T18:53:29,384 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-03T18:53:29,385 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-03T18:53:29,388 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-03T18:53:29,389 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-03T18:53:29,392 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/db5a5ccf5be8:0:becomeActiveMaster-HFileCleaner.large.0-1733252009390,5,FailOnTimeoutGroup] 2024-12-03T18:53:29,393 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/db5a5ccf5be8:0:becomeActiveMaster-HFileCleaner.small.0-1733252009393,5,FailOnTimeoutGroup] 2024-12-03T18:53:29,393 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-03T18:53:29,393 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-03T18:53:29,395 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-03T18:53:29,395 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37503 is added to blk_1073741831_1007 (size=1321) 2024-12-03T18:53:29,395 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42575 is added to blk_1073741831_1007 (size=1321) 2024-12-03T18:53:29,395 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-03T18:53:29,397 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:42673/user/jenkins/test-data/6807bb60-f78c-0ede-7870-b94fe8933208/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-03T18:53:29,397 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:42673/user/jenkins/test-data/6807bb60-f78c-0ede-7870-b94fe8933208 2024-12-03T18:53:29,407 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42575 is added to blk_1073741832_1008 (size=32) 2024-12-03T18:53:29,408 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37503 is added to blk_1073741832_1008 (size=32) 2024-12-03T18:53:29,410 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T18:53:29,411 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49813, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-12-03T18:53:29,412 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-03T18:53:29,417 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37317 {}] master.ServerManager(363): Checking decommissioned status of RegionServer db5a5ccf5be8,37095,1733252008279 2024-12-03T18:53:29,417 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-03T18:53:29,417 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T18:53:29,419 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T18:53:29,419 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-03T18:53:29,419 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37317 {}] master.ServerManager(517): Registering regionserver=db5a5ccf5be8,37095,1733252008279 2024-12-03T18:53:29,422 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-03T18:53:29,422 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T18:53:29,424 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T18:53:29,424 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-03T18:53:29,426 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-03T18:53:29,427 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T18:53:29,427 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T18:53:29,428 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-03T18:53:29,430 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-03T18:53:29,430 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T18:53:29,431 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T18:53:29,431 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-03T18:53:29,432 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42673/user/jenkins/test-data/6807bb60-f78c-0ede-7870-b94fe8933208/data/hbase/meta/1588230740 2024-12-03T18:53:29,433 DEBUG [RS:0;db5a5ccf5be8:37095 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:42673/user/jenkins/test-data/6807bb60-f78c-0ede-7870-b94fe8933208 2024-12-03T18:53:29,433 DEBUG [RS:0;db5a5ccf5be8:37095 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:42673 2024-12-03T18:53:29,433 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42673/user/jenkins/test-data/6807bb60-f78c-0ede-7870-b94fe8933208/data/hbase/meta/1588230740 2024-12-03T18:53:29,433 DEBUG [RS:0;db5a5ccf5be8:37095 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-03T18:53:29,436 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-03T18:53:29,436 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-03T18:53:29,437 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-03T18:53:29,439 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-03T18:53:29,443 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42673/user/jenkins/test-data/6807bb60-f78c-0ede-7870-b94fe8933208/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-03T18:53:29,444 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=775706, jitterRate=-0.013639748096466064}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-03T18:53:29,446 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733252009410Initializing all the Stores at 1733252009411 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733252009411Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733252009412 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733252009412Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733252009412Cleaning up temporary data from old regions at 1733252009436 (+24 ms)Region opened successfully at 1733252009446 (+10 ms) 2024-12-03T18:53:29,446 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-03T18:53:29,446 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-03T18:53:29,447 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-03T18:53:29,447 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-03T18:53:29,447 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-03T18:53:29,448 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-03T18:53:29,448 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733252009446Disabling compacts and flushes for region at 1733252009446Disabling writes for close at 1733252009447 (+1 ms)Writing region close event to WAL at 1733252009448 (+1 ms)Closed at 1733252009448 2024-12-03T18:53:29,449 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37317-0x1019c8abf090000, quorum=127.0.0.1:51229, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-03T18:53:29,449 DEBUG [RS:0;db5a5ccf5be8:37095 {}] zookeeper.ZKUtil(111): regionserver:37095-0x1019c8abf090001, quorum=127.0.0.1:51229, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/db5a5ccf5be8,37095,1733252008279 2024-12-03T18:53:29,449 WARN [RS:0;db5a5ccf5be8:37095 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-03T18:53:29,450 INFO [RS:0;db5a5ccf5be8:37095 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-03T18:53:29,450 DEBUG [RS:0;db5a5ccf5be8:37095 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:42673/user/jenkins/test-data/6807bb60-f78c-0ede-7870-b94fe8933208/WALs/db5a5ccf5be8,37095,1733252008279 2024-12-03T18:53:29,452 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-03T18:53:29,452 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-03T18:53:29,452 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [db5a5ccf5be8,37095,1733252008279] 2024-12-03T18:53:29,458 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-03T18:53:29,465 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-03T18:53:29,467 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-03T18:53:29,473 INFO [RS:0;db5a5ccf5be8:37095 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-03T18:53:29,483 INFO [RS:0;db5a5ccf5be8:37095 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-03T18:53:29,487 INFO [RS:0;db5a5ccf5be8:37095 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-03T18:53:29,487 INFO [RS:0;db5a5ccf5be8:37095 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-03T18:53:29,488 INFO [RS:0;db5a5ccf5be8:37095 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-03T18:53:29,493 INFO [RS:0;db5a5ccf5be8:37095 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-03T18:53:29,495 INFO [RS:0;db5a5ccf5be8:37095 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-03T18:53:29,495 DEBUG [RS:0;db5a5ccf5be8:37095 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/db5a5ccf5be8:0, corePoolSize=1, maxPoolSize=1 2024-12-03T18:53:29,495 DEBUG [RS:0;db5a5ccf5be8:37095 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/db5a5ccf5be8:0, corePoolSize=1, maxPoolSize=1 2024-12-03T18:53:29,495 DEBUG [RS:0;db5a5ccf5be8:37095 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/db5a5ccf5be8:0, corePoolSize=1, maxPoolSize=1 2024-12-03T18:53:29,495 DEBUG [RS:0;db5a5ccf5be8:37095 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/db5a5ccf5be8:0, corePoolSize=1, maxPoolSize=1 2024-12-03T18:53:29,495 DEBUG [RS:0;db5a5ccf5be8:37095 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/db5a5ccf5be8:0, corePoolSize=1, maxPoolSize=1 2024-12-03T18:53:29,496 DEBUG [RS:0;db5a5ccf5be8:37095 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/db5a5ccf5be8:0, corePoolSize=2, maxPoolSize=2 2024-12-03T18:53:29,496 DEBUG [RS:0;db5a5ccf5be8:37095 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/db5a5ccf5be8:0, corePoolSize=1, maxPoolSize=1 2024-12-03T18:53:29,496 DEBUG [RS:0;db5a5ccf5be8:37095 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/db5a5ccf5be8:0, corePoolSize=1, maxPoolSize=1 2024-12-03T18:53:29,496 DEBUG [RS:0;db5a5ccf5be8:37095 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/db5a5ccf5be8:0, corePoolSize=1, maxPoolSize=1 2024-12-03T18:53:29,496 DEBUG [RS:0;db5a5ccf5be8:37095 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/db5a5ccf5be8:0, corePoolSize=1, maxPoolSize=1 2024-12-03T18:53:29,496 DEBUG [RS:0;db5a5ccf5be8:37095 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/db5a5ccf5be8:0, corePoolSize=1, maxPoolSize=1 2024-12-03T18:53:29,496 DEBUG [RS:0;db5a5ccf5be8:37095 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/db5a5ccf5be8:0, corePoolSize=1, maxPoolSize=1 2024-12-03T18:53:29,496 DEBUG [RS:0;db5a5ccf5be8:37095 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/db5a5ccf5be8:0, corePoolSize=3, maxPoolSize=3 2024-12-03T18:53:29,497 DEBUG [RS:0;db5a5ccf5be8:37095 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/db5a5ccf5be8:0, corePoolSize=3, maxPoolSize=3 2024-12-03T18:53:29,497 INFO [RS:0;db5a5ccf5be8:37095 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-03T18:53:29,497 INFO [RS:0;db5a5ccf5be8:37095 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-03T18:53:29,498 INFO [RS:0;db5a5ccf5be8:37095 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-03T18:53:29,498 INFO [RS:0;db5a5ccf5be8:37095 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-03T18:53:29,498 INFO [RS:0;db5a5ccf5be8:37095 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-03T18:53:29,498 INFO [RS:0;db5a5ccf5be8:37095 {}] hbase.ChoreService(168): Chore ScheduledChore name=db5a5ccf5be8,37095,1733252008279-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-03T18:53:29,513 INFO [RS:0;db5a5ccf5be8:37095 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-03T18:53:29,515 INFO [RS:0;db5a5ccf5be8:37095 {}] hbase.ChoreService(168): Chore ScheduledChore name=db5a5ccf5be8,37095,1733252008279-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-03T18:53:29,516 INFO [RS:0;db5a5ccf5be8:37095 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T18:53:29,516 INFO [RS:0;db5a5ccf5be8:37095 {}] regionserver.Replication(171): db5a5ccf5be8,37095,1733252008279 started 2024-12-03T18:53:29,531 INFO [RS:0;db5a5ccf5be8:37095 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T18:53:29,531 INFO [RS:0;db5a5ccf5be8:37095 {}] regionserver.HRegionServer(1482): Serving as db5a5ccf5be8,37095,1733252008279, RpcServer on db5a5ccf5be8/172.17.0.2:37095, sessionid=0x1019c8abf090001 2024-12-03T18:53:29,532 DEBUG [RS:0;db5a5ccf5be8:37095 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-03T18:53:29,532 DEBUG [RS:0;db5a5ccf5be8:37095 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager db5a5ccf5be8,37095,1733252008279 2024-12-03T18:53:29,532 DEBUG [RS:0;db5a5ccf5be8:37095 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'db5a5ccf5be8,37095,1733252008279' 2024-12-03T18:53:29,532 DEBUG [RS:0;db5a5ccf5be8:37095 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-03T18:53:29,533 DEBUG [RS:0;db5a5ccf5be8:37095 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-03T18:53:29,534 DEBUG [RS:0;db5a5ccf5be8:37095 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-03T18:53:29,534 DEBUG [RS:0;db5a5ccf5be8:37095 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-03T18:53:29,535 DEBUG [RS:0;db5a5ccf5be8:37095 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager db5a5ccf5be8,37095,1733252008279 2024-12-03T18:53:29,535 DEBUG [RS:0;db5a5ccf5be8:37095 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'db5a5ccf5be8,37095,1733252008279' 2024-12-03T18:53:29,535 DEBUG [RS:0;db5a5ccf5be8:37095 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-03T18:53:29,536 DEBUG [RS:0;db5a5ccf5be8:37095 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-03T18:53:29,536 DEBUG [RS:0;db5a5ccf5be8:37095 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-03T18:53:29,536 INFO [RS:0;db5a5ccf5be8:37095 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-03T18:53:29,536 INFO [RS:0;db5a5ccf5be8:37095 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-03T18:53:29,619 WARN [db5a5ccf5be8:37317 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-03T18:53:29,643 INFO [RS:0;db5a5ccf5be8:37095 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=db5a5ccf5be8%2C37095%2C1733252008279, suffix=, logDir=hdfs://localhost:42673/user/jenkins/test-data/6807bb60-f78c-0ede-7870-b94fe8933208/WALs/db5a5ccf5be8,37095,1733252008279, archiveDir=hdfs://localhost:42673/user/jenkins/test-data/6807bb60-f78c-0ede-7870-b94fe8933208/oldWALs, maxLogs=32 2024-12-03T18:53:29,646 INFO [RS:0;db5a5ccf5be8:37095 {}] monitor.StreamSlowMonitor(122): New stream slow monitor db5a5ccf5be8%2C37095%2C1733252008279.1733252009646 2024-12-03T18:53:29,655 INFO [RS:0;db5a5ccf5be8:37095 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/6807bb60-f78c-0ede-7870-b94fe8933208/WALs/db5a5ccf5be8,37095,1733252008279/db5a5ccf5be8%2C37095%2C1733252008279.1733252009646 2024-12-03T18:53:29,656 DEBUG [RS:0;db5a5ccf5be8:37095 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46773:46773),(127.0.0.1/127.0.0.1:40269:40269)] 2024-12-03T18:53:29,874 DEBUG [db5a5ccf5be8:37317 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-03T18:53:29,890 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=db5a5ccf5be8,37095,1733252008279 2024-12-03T18:53:29,895 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as db5a5ccf5be8,37095,1733252008279, state=OPENING 2024-12-03T18:53:29,952 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-03T18:53:29,964 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37095-0x1019c8abf090001, quorum=127.0.0.1:51229, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T18:53:29,964 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37317-0x1019c8abf090000, quorum=127.0.0.1:51229, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T18:53:29,966 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-03T18:53:29,966 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-03T18:53:29,968 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-03T18:53:29,970 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=db5a5ccf5be8,37095,1733252008279}] 2024-12-03T18:53:30,146 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-03T18:53:30,149 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50469, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-03T18:53:30,160 INFO [RS_OPEN_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-03T18:53:30,161 INFO [RS_OPEN_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-03T18:53:30,165 INFO [RS_OPEN_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=db5a5ccf5be8%2C37095%2C1733252008279.meta, suffix=.meta, logDir=hdfs://localhost:42673/user/jenkins/test-data/6807bb60-f78c-0ede-7870-b94fe8933208/WALs/db5a5ccf5be8,37095,1733252008279, archiveDir=hdfs://localhost:42673/user/jenkins/test-data/6807bb60-f78c-0ede-7870-b94fe8933208/oldWALs, maxLogs=32 2024-12-03T18:53:30,166 INFO [RS_OPEN_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor db5a5ccf5be8%2C37095%2C1733252008279.meta.1733252010166.meta 2024-12-03T18:53:30,174 INFO [RS_OPEN_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/6807bb60-f78c-0ede-7870-b94fe8933208/WALs/db5a5ccf5be8,37095,1733252008279/db5a5ccf5be8%2C37095%2C1733252008279.meta.1733252010166.meta 2024-12-03T18:53:30,175 DEBUG [RS_OPEN_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40269:40269),(127.0.0.1/127.0.0.1:46773:46773)] 2024-12-03T18:53:30,176 DEBUG [RS_OPEN_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-03T18:53:30,177 DEBUG [RS_OPEN_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-03T18:53:30,179 DEBUG [RS_OPEN_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-03T18:53:30,184 INFO [RS_OPEN_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-03T18:53:30,187 DEBUG [RS_OPEN_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-03T18:53:30,188 DEBUG [RS_OPEN_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T18:53:30,188 DEBUG [RS_OPEN_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-03T18:53:30,188 DEBUG [RS_OPEN_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-03T18:53:30,191 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-03T18:53:30,193 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-03T18:53:30,193 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T18:53:30,194 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T18:53:30,194 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-03T18:53:30,196 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-03T18:53:30,196 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T18:53:30,197 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T18:53:30,197 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-03T18:53:30,199 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-03T18:53:30,199 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T18:53:30,200 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T18:53:30,200 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-03T18:53:30,201 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-03T18:53:30,202 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T18:53:30,202 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T18:53:30,203 DEBUG [RS_OPEN_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-03T18:53:30,204 DEBUG [RS_OPEN_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42673/user/jenkins/test-data/6807bb60-f78c-0ede-7870-b94fe8933208/data/hbase/meta/1588230740 2024-12-03T18:53:30,206 DEBUG [RS_OPEN_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42673/user/jenkins/test-data/6807bb60-f78c-0ede-7870-b94fe8933208/data/hbase/meta/1588230740 2024-12-03T18:53:30,209 DEBUG [RS_OPEN_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-03T18:53:30,209 DEBUG [RS_OPEN_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-03T18:53:30,210 DEBUG [RS_OPEN_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-03T18:53:30,212 DEBUG [RS_OPEN_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-03T18:53:30,214 INFO [RS_OPEN_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=872985, jitterRate=0.11005841195583344}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-03T18:53:30,215 DEBUG [RS_OPEN_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-03T18:53:30,216 DEBUG [RS_OPEN_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733252010189Writing region info on filesystem at 1733252010189Initializing all the Stores at 1733252010191 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733252010191Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733252010191Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733252010191Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733252010191Cleaning up temporary data from old regions at 1733252010209 (+18 ms)Running coprocessor post-open hooks at 1733252010215 (+6 ms)Region opened successfully at 1733252010216 (+1 ms) 2024-12-03T18:53:30,223 INFO [RS_OPEN_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733252010138 2024-12-03T18:53:30,233 DEBUG [RS_OPEN_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-03T18:53:30,234 INFO [RS_OPEN_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-03T18:53:30,235 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=db5a5ccf5be8,37095,1733252008279 2024-12-03T18:53:30,237 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as db5a5ccf5be8,37095,1733252008279, state=OPEN 2024-12-03T18:53:30,271 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37317-0x1019c8abf090000, quorum=127.0.0.1:51229, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-03T18:53:30,271 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37095-0x1019c8abf090001, quorum=127.0.0.1:51229, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-03T18:53:30,272 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-03T18:53:30,272 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-03T18:53:30,273 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=db5a5ccf5be8,37095,1733252008279 2024-12-03T18:53:30,284 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-03T18:53:30,284 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=db5a5ccf5be8,37095,1733252008279 in 303 msec 2024-12-03T18:53:30,290 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-03T18:53:30,290 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 828 msec 2024-12-03T18:53:30,291 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-03T18:53:30,291 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-03T18:53:30,310 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-03T18:53:30,311 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=db5a5ccf5be8,37095,1733252008279, seqNum=-1] 2024-12-03T18:53:30,330 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-03T18:53:30,332 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34575, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-03T18:53:30,352 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.0600 sec 2024-12-03T18:53:30,352 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733252010352, completionTime=-1 2024-12-03T18:53:30,355 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-03T18:53:30,356 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-03T18:53:30,378 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-12-03T18:53:30,378 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733252070378 2024-12-03T18:53:30,379 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733252130379 2024-12-03T18:53:30,379 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 22 msec 2024-12-03T18:53:30,382 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=db5a5ccf5be8,37317,1733252007561-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-03T18:53:30,383 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=db5a5ccf5be8,37317,1733252007561-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T18:53:30,383 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=db5a5ccf5be8,37317,1733252007561-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T18:53:30,385 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-db5a5ccf5be8:37317, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T18:53:30,385 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-03T18:53:30,386 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-03T18:53:30,391 DEBUG [master/db5a5ccf5be8:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-03T18:53:30,411 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.972sec 2024-12-03T18:53:30,412 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-03T18:53:30,413 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-03T18:53:30,414 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-03T18:53:30,414 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-03T18:53:30,415 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-03T18:53:30,415 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=db5a5ccf5be8,37317,1733252007561-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-03T18:53:30,416 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=db5a5ccf5be8,37317,1733252007561-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-03T18:53:30,423 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-03T18:53:30,425 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-03T18:53:30,425 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=db5a5ccf5be8,37317,1733252007561-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T18:53:30,484 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@66cb686a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T18:53:30,487 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-12-03T18:53:30,487 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-12-03T18:53:30,491 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request db5a5ccf5be8,37317,-1 for getting cluster id 2024-12-03T18:53:30,494 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-03T18:53:30,502 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '4268db8e-e7a9-4c21-ab90-821a9ca2cb79' 2024-12-03T18:53:30,505 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-03T18:53:30,505 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "4268db8e-e7a9-4c21-ab90-821a9ca2cb79" 2024-12-03T18:53:30,507 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@33ae194b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T18:53:30,507 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [db5a5ccf5be8,37317,-1] 2024-12-03T18:53:30,510 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-03T18:53:30,511 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T18:53:30,513 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57700, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-03T18:53:30,515 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@32fb6022, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T18:53:30,516 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-03T18:53:30,521 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=db5a5ccf5be8,37095,1733252008279, seqNum=-1] 2024-12-03T18:53:30,522 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-03T18:53:30,524 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60432, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-03T18:53:30,542 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=db5a5ccf5be8,37317,1733252007561 2024-12-03T18:53:30,542 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T18:53:30,548 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-12-03T18:53:30,552 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-03T18:53:30,556 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.AsyncConnectionImpl(321): The fetched master address is db5a5ccf5be8,37317,1733252007561 2024-12-03T18:53:30,558 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@7257518 2024-12-03T18:53:30,559 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-03T18:53:30,562 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57702, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-03T18:53:30,564 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37317 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-12-03T18:53:30,564 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37317 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-12-03T18:53:30,568 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37317 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testSlowSyncLogRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-03T18:53:30,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37317 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling 2024-12-03T18:53:30,581 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-12-03T18:53:30,583 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37317 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testSlowSyncLogRolling" procId is: 4 2024-12-03T18:53:30,583 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T18:53:30,586 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-03T18:53:30,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37317 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-03T18:53:30,616 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37503 is added to blk_1073741835_1011 (size=389) 2024-12-03T18:53:30,617 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42575 is added to blk_1073741835_1011 (size=389) 2024-12-03T18:53:30,620 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 0e6b1d836297524bb6b7585c1f9577c5, NAME => 'TestLogRolling-testSlowSyncLogRolling,,1733252010564.0e6b1d836297524bb6b7585c1f9577c5.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testSlowSyncLogRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:42673/user/jenkins/test-data/6807bb60-f78c-0ede-7870-b94fe8933208 2024-12-03T18:53:30,654 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42575 is added to blk_1073741836_1012 (size=72) 2024-12-03T18:53:30,654 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37503 is added to blk_1073741836_1012 (size=72) 2024-12-03T18:53:30,658 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testSlowSyncLogRolling,,1733252010564.0e6b1d836297524bb6b7585c1f9577c5.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T18:53:30,658 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1722): Closing 0e6b1d836297524bb6b7585c1f9577c5, disabling compactions & flushes 2024-12-03T18:53:30,658 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testSlowSyncLogRolling,,1733252010564.0e6b1d836297524bb6b7585c1f9577c5. 2024-12-03T18:53:30,658 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testSlowSyncLogRolling,,1733252010564.0e6b1d836297524bb6b7585c1f9577c5. 2024-12-03T18:53:30,659 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testSlowSyncLogRolling,,1733252010564.0e6b1d836297524bb6b7585c1f9577c5. after waiting 0 ms 2024-12-03T18:53:30,659 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testSlowSyncLogRolling,,1733252010564.0e6b1d836297524bb6b7585c1f9577c5. 2024-12-03T18:53:30,659 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testSlowSyncLogRolling,,1733252010564.0e6b1d836297524bb6b7585c1f9577c5. 2024-12-03T18:53:30,659 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for 0e6b1d836297524bb6b7585c1f9577c5: Waiting for close lock at 1733252010658Disabling compacts and flushes for region at 1733252010658Disabling writes for close at 1733252010659 (+1 ms)Writing region close event to WAL at 1733252010659Closed at 1733252010659 2024-12-03T18:53:30,661 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_ADD_TO_META 2024-12-03T18:53:30,669 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testSlowSyncLogRolling,,1733252010564.0e6b1d836297524bb6b7585c1f9577c5.","families":{"info":[{"qualifier":"regioninfo","vlen":71,"tag":[],"timestamp":"1733252010662"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733252010662"}]},"ts":"1733252010662"} 2024-12-03T18:53:30,674 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-03T18:53:30,676 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-03T18:53:30,678 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testSlowSyncLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733252010676"}]},"ts":"1733252010676"} 2024-12-03T18:53:30,683 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testSlowSyncLogRolling, state=ENABLING in hbase:meta 2024-12-03T18:53:30,685 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=0e6b1d836297524bb6b7585c1f9577c5, ASSIGN}] 2024-12-03T18:53:30,688 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=0e6b1d836297524bb6b7585c1f9577c5, ASSIGN 2024-12-03T18:53:30,690 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=0e6b1d836297524bb6b7585c1f9577c5, ASSIGN; state=OFFLINE, location=db5a5ccf5be8,37095,1733252008279; forceNewPlan=false, retain=false 2024-12-03T18:53:30,842 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=0e6b1d836297524bb6b7585c1f9577c5, regionState=OPENING, regionLocation=db5a5ccf5be8,37095,1733252008279 2024-12-03T18:53:30,849 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=0e6b1d836297524bb6b7585c1f9577c5, ASSIGN because future has completed 2024-12-03T18:53:30,851 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 0e6b1d836297524bb6b7585c1f9577c5, server=db5a5ccf5be8,37095,1733252008279}] 2024-12-03T18:53:31,018 INFO [RS_OPEN_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testSlowSyncLogRolling,,1733252010564.0e6b1d836297524bb6b7585c1f9577c5. 2024-12-03T18:53:31,019 DEBUG [RS_OPEN_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 0e6b1d836297524bb6b7585c1f9577c5, NAME => 'TestLogRolling-testSlowSyncLogRolling,,1733252010564.0e6b1d836297524bb6b7585c1f9577c5.', STARTKEY => '', ENDKEY => ''} 2024-12-03T18:53:31,019 DEBUG [RS_OPEN_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testSlowSyncLogRolling 0e6b1d836297524bb6b7585c1f9577c5 2024-12-03T18:53:31,019 DEBUG [RS_OPEN_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testSlowSyncLogRolling,,1733252010564.0e6b1d836297524bb6b7585c1f9577c5.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T18:53:31,019 DEBUG [RS_OPEN_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 0e6b1d836297524bb6b7585c1f9577c5 2024-12-03T18:53:31,019 DEBUG [RS_OPEN_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 0e6b1d836297524bb6b7585c1f9577c5 2024-12-03T18:53:31,022 INFO [StoreOpener-0e6b1d836297524bb6b7585c1f9577c5-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 0e6b1d836297524bb6b7585c1f9577c5 2024-12-03T18:53:31,024 INFO [StoreOpener-0e6b1d836297524bb6b7585c1f9577c5-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 0e6b1d836297524bb6b7585c1f9577c5 columnFamilyName info 2024-12-03T18:53:31,024 DEBUG [StoreOpener-0e6b1d836297524bb6b7585c1f9577c5-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T18:53:31,025 INFO [StoreOpener-0e6b1d836297524bb6b7585c1f9577c5-1 {}] regionserver.HStore(327): Store=0e6b1d836297524bb6b7585c1f9577c5/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T18:53:31,026 DEBUG [RS_OPEN_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 0e6b1d836297524bb6b7585c1f9577c5 2024-12-03T18:53:31,027 DEBUG [RS_OPEN_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42673/user/jenkins/test-data/6807bb60-f78c-0ede-7870-b94fe8933208/data/default/TestLogRolling-testSlowSyncLogRolling/0e6b1d836297524bb6b7585c1f9577c5 2024-12-03T18:53:31,028 DEBUG [RS_OPEN_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42673/user/jenkins/test-data/6807bb60-f78c-0ede-7870-b94fe8933208/data/default/TestLogRolling-testSlowSyncLogRolling/0e6b1d836297524bb6b7585c1f9577c5 2024-12-03T18:53:31,028 DEBUG [RS_OPEN_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 0e6b1d836297524bb6b7585c1f9577c5 2024-12-03T18:53:31,028 DEBUG [RS_OPEN_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 0e6b1d836297524bb6b7585c1f9577c5 2024-12-03T18:53:31,031 DEBUG [RS_OPEN_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 0e6b1d836297524bb6b7585c1f9577c5 2024-12-03T18:53:31,034 DEBUG [RS_OPEN_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42673/user/jenkins/test-data/6807bb60-f78c-0ede-7870-b94fe8933208/data/default/TestLogRolling-testSlowSyncLogRolling/0e6b1d836297524bb6b7585c1f9577c5/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-03T18:53:31,035 INFO [RS_OPEN_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 0e6b1d836297524bb6b7585c1f9577c5; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=695886, jitterRate=-0.11513617634773254}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-03T18:53:31,035 DEBUG [RS_OPEN_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 0e6b1d836297524bb6b7585c1f9577c5 2024-12-03T18:53:31,036 DEBUG [RS_OPEN_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 0e6b1d836297524bb6b7585c1f9577c5: Running coprocessor pre-open hook at 1733252011020Writing region info on filesystem at 1733252011020Initializing all the Stores at 1733252011021 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733252011021Cleaning up temporary data from old regions at 1733252011028 (+7 ms)Running coprocessor post-open hooks at 1733252011035 (+7 ms)Region opened successfully at 1733252011036 (+1 ms) 2024-12-03T18:53:31,038 INFO [RS_OPEN_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testSlowSyncLogRolling,,1733252010564.0e6b1d836297524bb6b7585c1f9577c5., pid=6, masterSystemTime=1733252011007 2024-12-03T18:53:31,041 DEBUG [RS_OPEN_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testSlowSyncLogRolling,,1733252010564.0e6b1d836297524bb6b7585c1f9577c5. 2024-12-03T18:53:31,041 INFO [RS_OPEN_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testSlowSyncLogRolling,,1733252010564.0e6b1d836297524bb6b7585c1f9577c5. 2024-12-03T18:53:31,042 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=0e6b1d836297524bb6b7585c1f9577c5, regionState=OPEN, openSeqNum=2, regionLocation=db5a5ccf5be8,37095,1733252008279 2024-12-03T18:53:31,046 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 0e6b1d836297524bb6b7585c1f9577c5, server=db5a5ccf5be8,37095,1733252008279 because future has completed 2024-12-03T18:53:31,052 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-12-03T18:53:31,053 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 0e6b1d836297524bb6b7585c1f9577c5, server=db5a5ccf5be8,37095,1733252008279 in 199 msec 2024-12-03T18:53:31,056 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-12-03T18:53:31,056 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=0e6b1d836297524bb6b7585c1f9577c5, ASSIGN in 368 msec 2024-12-03T18:53:31,057 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-03T18:53:31,058 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testSlowSyncLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733252011058"}]},"ts":"1733252011058"} 2024-12-03T18:53:31,061 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testSlowSyncLogRolling, state=ENABLED in hbase:meta 2024-12-03T18:53:31,063 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_POST_OPERATION 2024-12-03T18:53:31,067 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling in 492 msec 2024-12-03T18:53:35,696 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-03T18:53:35,739 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-03T18:53:35,741 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testSlowSyncLogRolling' 2024-12-03T18:53:37,877 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-03T18:53:37,878 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-12-03T18:53:37,883 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling 2024-12-03T18:53:37,883 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling Metrics about Tables on a single HBase RegionServer 2024-12-03T18:53:37,886 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-03T18:53:37,886 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-12-03T18:53:37,887 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-12-03T18:53:37,888 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-12-03T18:53:40,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37317 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-03T18:53:40,673 INFO [RPCClient-NioEventLoopGroup-4-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testSlowSyncLogRolling completed 2024-12-03T18:53:40,679 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testSlowSyncLogRolling,, stopping at row=TestLogRolling-testSlowSyncLogRolling ,, for max=2147483647 with caching=100 2024-12-03T18:53:40,686 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testSlowSyncLogRolling 2024-12-03T18:53:40,686 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testSlowSyncLogRolling,,1733252010564.0e6b1d836297524bb6b7585c1f9577c5. 2024-12-03T18:53:40,687 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor db5a5ccf5be8%2C37095%2C1733252008279.1733252020687 2024-12-03T18:53:40,695 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:53:40,695 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:53:40,696 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:53:40,696 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:53:40,696 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:53:40,697 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/6807bb60-f78c-0ede-7870-b94fe8933208/WALs/db5a5ccf5be8,37095,1733252008279/db5a5ccf5be8%2C37095%2C1733252008279.1733252009646 with entries=1, filesize=443 B; new WAL /user/jenkins/test-data/6807bb60-f78c-0ede-7870-b94fe8933208/WALs/db5a5ccf5be8,37095,1733252008279/db5a5ccf5be8%2C37095%2C1733252008279.1733252020687 2024-12-03T18:53:40,698 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40269:40269),(127.0.0.1/127.0.0.1:46773:46773)] 2024-12-03T18:53:40,698 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:42673/user/jenkins/test-data/6807bb60-f78c-0ede-7870-b94fe8933208/WALs/db5a5ccf5be8,37095,1733252008279/db5a5ccf5be8%2C37095%2C1733252008279.1733252009646 is not closed yet, will try archiving it next time 2024-12-03T18:53:40,700 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37503 is added to blk_1073741833_1009 (size=451) 2024-12-03T18:53:40,701 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42575 is added to blk_1073741833_1009 (size=451) 2024-12-03T18:53:40,703 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:42673/user/jenkins/test-data/6807bb60-f78c-0ede-7870-b94fe8933208/WALs/db5a5ccf5be8,37095,1733252008279/db5a5ccf5be8%2C37095%2C1733252008279.1733252009646 to hdfs://localhost:42673/user/jenkins/test-data/6807bb60-f78c-0ede-7870-b94fe8933208/oldWALs/db5a5ccf5be8%2C37095%2C1733252008279.1733252009646 2024-12-03T18:53:40,709 DEBUG [RPCClient-NioEventLoopGroup-4-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testSlowSyncLogRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testSlowSyncLogRolling,,1733252010564.0e6b1d836297524bb6b7585c1f9577c5., hostname=db5a5ccf5be8,37095,1733252008279, seqNum=2] 2024-12-03T18:53:52,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37095 {}] regionserver.HRegion(8855): Flush requested on 0e6b1d836297524bb6b7585c1f9577c5 2024-12-03T18:53:52,753 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 0e6b1d836297524bb6b7585c1f9577c5 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-03T18:53:52,805 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42673/user/jenkins/test-data/6807bb60-f78c-0ede-7870-b94fe8933208/data/default/TestLogRolling-testSlowSyncLogRolling/0e6b1d836297524bb6b7585c1f9577c5/.tmp/info/adfa34d7c56f46a9ade09ff23adbdd2c is 1080, key is row0001/info:/1733252020711/Put/seqid=0 2024-12-03T18:53:52,815 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42575 is added to blk_1073741838_1014 (size=12509) 2024-12-03T18:53:52,815 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37503 is added to blk_1073741838_1014 (size=12509) 2024-12-03T18:53:52,816 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:42673/user/jenkins/test-data/6807bb60-f78c-0ede-7870-b94fe8933208/data/default/TestLogRolling-testSlowSyncLogRolling/0e6b1d836297524bb6b7585c1f9577c5/.tmp/info/adfa34d7c56f46a9ade09ff23adbdd2c 2024-12-03T18:53:52,859 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42673/user/jenkins/test-data/6807bb60-f78c-0ede-7870-b94fe8933208/data/default/TestLogRolling-testSlowSyncLogRolling/0e6b1d836297524bb6b7585c1f9577c5/.tmp/info/adfa34d7c56f46a9ade09ff23adbdd2c as hdfs://localhost:42673/user/jenkins/test-data/6807bb60-f78c-0ede-7870-b94fe8933208/data/default/TestLogRolling-testSlowSyncLogRolling/0e6b1d836297524bb6b7585c1f9577c5/info/adfa34d7c56f46a9ade09ff23adbdd2c 2024-12-03T18:53:52,869 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42673/user/jenkins/test-data/6807bb60-f78c-0ede-7870-b94fe8933208/data/default/TestLogRolling-testSlowSyncLogRolling/0e6b1d836297524bb6b7585c1f9577c5/info/adfa34d7c56f46a9ade09ff23adbdd2c, entries=7, sequenceid=11, filesize=12.2 K 2024-12-03T18:53:52,875 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 0e6b1d836297524bb6b7585c1f9577c5 in 123ms, sequenceid=11, compaction requested=false 2024-12-03T18:53:52,877 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 0e6b1d836297524bb6b7585c1f9577c5: 2024-12-03T18:53:56,702 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-03T18:54:00,774 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor db5a5ccf5be8%2C37095%2C1733252008279.1733252040773 2024-12-03T18:54:00,992 INFO [Time-limited test {}] wal.AbstractFSWAL(1368): Slow sync cost: 214 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37503,DS-8a563e24-9cd4-4f83-a5d0-093d49ec545e,DISK], DatanodeInfoWithStorage[127.0.0.1:42575,DS-3fd4f226-4fcd-46db-acad-aaba0db1519b,DISK]] 2024-12-03T18:54:00,993 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:54:00,993 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:54:00,993 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:54:00,993 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:54:00,994 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:54:00,994 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/6807bb60-f78c-0ede-7870-b94fe8933208/WALs/db5a5ccf5be8,37095,1733252008279/db5a5ccf5be8%2C37095%2C1733252008279.1733252020687 with entries=12, filesize=12.10 KB; new WAL /user/jenkins/test-data/6807bb60-f78c-0ede-7870-b94fe8933208/WALs/db5a5ccf5be8,37095,1733252008279/db5a5ccf5be8%2C37095%2C1733252008279.1733252040773 2024-12-03T18:54:00,995 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40269:40269),(127.0.0.1/127.0.0.1:46773:46773)] 2024-12-03T18:54:00,995 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:42673/user/jenkins/test-data/6807bb60-f78c-0ede-7870-b94fe8933208/WALs/db5a5ccf5be8,37095,1733252008279/db5a5ccf5be8%2C37095%2C1733252008279.1733252020687 is not closed yet, will try archiving it next time 2024-12-03T18:54:00,997 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42575 is added to blk_1073741837_1013 (size=12399) 2024-12-03T18:54:00,997 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37503 is added to blk_1073741837_1013 (size=12399) 2024-12-03T18:54:01,200 INFO [FSHLog-0-hdfs://localhost:42673/user/jenkins/test-data/6807bb60-f78c-0ede-7870-b94fe8933208-prefix:db5a5ccf5be8,37095,1733252008279 {}] wal.AbstractFSWAL(1368): Slow sync cost: 202 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37503,DS-8a563e24-9cd4-4f83-a5d0-093d49ec545e,DISK], DatanodeInfoWithStorage[127.0.0.1:42575,DS-3fd4f226-4fcd-46db-acad-aaba0db1519b,DISK]] 2024-12-03T18:54:03,408 INFO [FSHLog-0-hdfs://localhost:42673/user/jenkins/test-data/6807bb60-f78c-0ede-7870-b94fe8933208-prefix:db5a5ccf5be8,37095,1733252008279 {}] wal.AbstractFSWAL(1368): Slow sync cost: 202 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37503,DS-8a563e24-9cd4-4f83-a5d0-093d49ec545e,DISK], DatanodeInfoWithStorage[127.0.0.1:42575,DS-3fd4f226-4fcd-46db-acad-aaba0db1519b,DISK]] 2024-12-03T18:54:05,614 INFO [FSHLog-0-hdfs://localhost:42673/user/jenkins/test-data/6807bb60-f78c-0ede-7870-b94fe8933208-prefix:db5a5ccf5be8,37095,1733252008279 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37503,DS-8a563e24-9cd4-4f83-a5d0-093d49ec545e,DISK], DatanodeInfoWithStorage[127.0.0.1:42575,DS-3fd4f226-4fcd-46db-acad-aaba0db1519b,DISK]] 2024-12-03T18:54:07,822 INFO [FSHLog-0-hdfs://localhost:42673/user/jenkins/test-data/6807bb60-f78c-0ede-7870-b94fe8933208-prefix:db5a5ccf5be8,37095,1733252008279 {}] wal.AbstractFSWAL(1368): Slow sync cost: 202 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37503,DS-8a563e24-9cd4-4f83-a5d0-093d49ec545e,DISK], DatanodeInfoWithStorage[127.0.0.1:42575,DS-3fd4f226-4fcd-46db-acad-aaba0db1519b,DISK]] 2024-12-03T18:54:07,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37095 {}] regionserver.HRegion(8855): Flush requested on 0e6b1d836297524bb6b7585c1f9577c5 2024-12-03T18:54:07,824 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 0e6b1d836297524bb6b7585c1f9577c5 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-03T18:54:08,028 INFO [FSHLog-0-hdfs://localhost:42673/user/jenkins/test-data/6807bb60-f78c-0ede-7870-b94fe8933208-prefix:db5a5ccf5be8,37095,1733252008279 {}] wal.AbstractFSWAL(1368): Slow sync cost: 202 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37503,DS-8a563e24-9cd4-4f83-a5d0-093d49ec545e,DISK], DatanodeInfoWithStorage[127.0.0.1:42575,DS-3fd4f226-4fcd-46db-acad-aaba0db1519b,DISK]] 2024-12-03T18:54:08,039 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42673/user/jenkins/test-data/6807bb60-f78c-0ede-7870-b94fe8933208/data/default/TestLogRolling-testSlowSyncLogRolling/0e6b1d836297524bb6b7585c1f9577c5/.tmp/info/26572ef0a49f4b9cbcf526902bc63c48 is 1080, key is row0008/info:/1733252034753/Put/seqid=0 2024-12-03T18:54:08,048 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42575 is added to blk_1073741840_1016 (size=12509) 2024-12-03T18:54:08,049 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37503 is added to blk_1073741840_1016 (size=12509) 2024-12-03T18:54:08,049 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:42673/user/jenkins/test-data/6807bb60-f78c-0ede-7870-b94fe8933208/data/default/TestLogRolling-testSlowSyncLogRolling/0e6b1d836297524bb6b7585c1f9577c5/.tmp/info/26572ef0a49f4b9cbcf526902bc63c48 2024-12-03T18:54:08,059 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42673/user/jenkins/test-data/6807bb60-f78c-0ede-7870-b94fe8933208/data/default/TestLogRolling-testSlowSyncLogRolling/0e6b1d836297524bb6b7585c1f9577c5/.tmp/info/26572ef0a49f4b9cbcf526902bc63c48 as hdfs://localhost:42673/user/jenkins/test-data/6807bb60-f78c-0ede-7870-b94fe8933208/data/default/TestLogRolling-testSlowSyncLogRolling/0e6b1d836297524bb6b7585c1f9577c5/info/26572ef0a49f4b9cbcf526902bc63c48 2024-12-03T18:54:08,069 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42673/user/jenkins/test-data/6807bb60-f78c-0ede-7870-b94fe8933208/data/default/TestLogRolling-testSlowSyncLogRolling/0e6b1d836297524bb6b7585c1f9577c5/info/26572ef0a49f4b9cbcf526902bc63c48, entries=7, sequenceid=21, filesize=12.2 K 2024-12-03T18:54:08,272 INFO [FSHLog-0-hdfs://localhost:42673/user/jenkins/test-data/6807bb60-f78c-0ede-7870-b94fe8933208-prefix:db5a5ccf5be8,37095,1733252008279 {}] wal.AbstractFSWAL(1368): Slow sync cost: 202 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37503,DS-8a563e24-9cd4-4f83-a5d0-093d49ec545e,DISK], DatanodeInfoWithStorage[127.0.0.1:42575,DS-3fd4f226-4fcd-46db-acad-aaba0db1519b,DISK]] 2024-12-03T18:54:08,273 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 0e6b1d836297524bb6b7585c1f9577c5 in 449ms, sequenceid=21, compaction requested=false 2024-12-03T18:54:08,273 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 0e6b1d836297524bb6b7585c1f9577c5: 2024-12-03T18:54:08,274 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=24.4 K, sizeToCheck=16.0 K 2024-12-03T18:54:08,274 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-03T18:54:08,277 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:42673/user/jenkins/test-data/6807bb60-f78c-0ede-7870-b94fe8933208/data/default/TestLogRolling-testSlowSyncLogRolling/0e6b1d836297524bb6b7585c1f9577c5/info/adfa34d7c56f46a9ade09ff23adbdd2c because midkey is the same as first or last row 2024-12-03T18:54:10,032 INFO [FSHLog-0-hdfs://localhost:42673/user/jenkins/test-data/6807bb60-f78c-0ede-7870-b94fe8933208-prefix:db5a5ccf5be8,37095,1733252008279 {}] wal.AbstractFSWAL(1368): Slow sync cost: 202 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37503,DS-8a563e24-9cd4-4f83-a5d0-093d49ec545e,DISK], DatanodeInfoWithStorage[127.0.0.1:42575,DS-3fd4f226-4fcd-46db-acad-aaba0db1519b,DISK]] 2024-12-03T18:54:10,517 INFO [master/db5a5ccf5be8:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-12-03T18:54:10,517 INFO [master/db5a5ccf5be8:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-12-03T18:54:12,239 INFO [FSHLog-0-hdfs://localhost:42673/user/jenkins/test-data/6807bb60-f78c-0ede-7870-b94fe8933208-prefix:db5a5ccf5be8,37095,1733252008279 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37503,DS-8a563e24-9cd4-4f83-a5d0-093d49ec545e,DISK], DatanodeInfoWithStorage[127.0.0.1:42575,DS-3fd4f226-4fcd-46db-acad-aaba0db1519b,DISK]] 2024-12-03T18:54:12,242 WARN [FSHLog-0-hdfs://localhost:42673/user/jenkins/test-data/6807bb60-f78c-0ede-7870-b94fe8933208-prefix:db5a5ccf5be8,37095,1733252008279 {}] wal.AbstractFSWAL(2201): Requesting log roll because we exceeded slow sync threshold; count=8, threshold=5, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37503,DS-8a563e24-9cd4-4f83-a5d0-093d49ec545e,DISK], DatanodeInfoWithStorage[127.0.0.1:42575,DS-3fd4f226-4fcd-46db-acad-aaba0db1519b,DISK]] 2024-12-03T18:54:12,244 DEBUG [regionserver/db5a5ccf5be8:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog db5a5ccf5be8%2C37095%2C1733252008279:(num 1733252040773) roll requested 2024-12-03T18:54:12,245 INFO [regionserver/db5a5ccf5be8:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor db5a5ccf5be8%2C37095%2C1733252008279.1733252052244 2024-12-03T18:54:12,459 INFO [regionserver/db5a5ccf5be8:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 212 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37503,DS-8a563e24-9cd4-4f83-a5d0-093d49ec545e,DISK], DatanodeInfoWithStorage[127.0.0.1:42575,DS-3fd4f226-4fcd-46db-acad-aaba0db1519b,DISK]] 2024-12-03T18:54:12,460 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:54:12,460 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:54:12,460 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:54:12,460 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:54:12,460 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:54:12,461 INFO [regionserver/db5a5ccf5be8:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/6807bb60-f78c-0ede-7870-b94fe8933208/WALs/db5a5ccf5be8,37095,1733252008279/db5a5ccf5be8%2C37095%2C1733252008279.1733252040773 with entries=8, filesize=7.55 KB; new WAL /user/jenkins/test-data/6807bb60-f78c-0ede-7870-b94fe8933208/WALs/db5a5ccf5be8,37095,1733252008279/db5a5ccf5be8%2C37095%2C1733252008279.1733252052244 2024-12-03T18:54:12,462 DEBUG [regionserver/db5a5ccf5be8:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46773:46773),(127.0.0.1/127.0.0.1:40269:40269)] 2024-12-03T18:54:12,462 DEBUG [regionserver/db5a5ccf5be8:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:42673/user/jenkins/test-data/6807bb60-f78c-0ede-7870-b94fe8933208/WALs/db5a5ccf5be8,37095,1733252008279/db5a5ccf5be8%2C37095%2C1733252008279.1733252040773 is not closed yet, will try archiving it next time 2024-12-03T18:54:12,462 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:42673/user/jenkins/test-data/6807bb60-f78c-0ede-7870-b94fe8933208/WALs/db5a5ccf5be8,37095,1733252008279/db5a5ccf5be8%2C37095%2C1733252008279.1733252020687 to hdfs://localhost:42673/user/jenkins/test-data/6807bb60-f78c-0ede-7870-b94fe8933208/oldWALs/db5a5ccf5be8%2C37095%2C1733252008279.1733252020687 2024-12-03T18:54:12,463 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42575 is added to blk_1073741839_1015 (size=7739) 2024-12-03T18:54:12,464 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37503 is added to blk_1073741839_1015 (size=7739) 2024-12-03T18:54:14,447 INFO [FSHLog-0-hdfs://localhost:42673/user/jenkins/test-data/6807bb60-f78c-0ede-7870-b94fe8933208-prefix:db5a5ccf5be8,37095,1733252008279 {}] wal.AbstractFSWAL(1368): Slow sync cost: 202 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42575,DS-3fd4f226-4fcd-46db-acad-aaba0db1519b,DISK], DatanodeInfoWithStorage[127.0.0.1:37503,DS-8a563e24-9cd4-4f83-a5d0-093d49ec545e,DISK]] 2024-12-03T18:54:16,020 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 0e6b1d836297524bb6b7585c1f9577c5, had cached 0 bytes from a total of 25018 2024-12-03T18:54:16,655 INFO [FSHLog-0-hdfs://localhost:42673/user/jenkins/test-data/6807bb60-f78c-0ede-7870-b94fe8933208-prefix:db5a5ccf5be8,37095,1733252008279 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42575,DS-3fd4f226-4fcd-46db-acad-aaba0db1519b,DISK], DatanodeInfoWithStorage[127.0.0.1:37503,DS-8a563e24-9cd4-4f83-a5d0-093d49ec545e,DISK]] 2024-12-03T18:54:18,862 INFO [FSHLog-0-hdfs://localhost:42673/user/jenkins/test-data/6807bb60-f78c-0ede-7870-b94fe8933208-prefix:db5a5ccf5be8,37095,1733252008279 {}] wal.AbstractFSWAL(1368): Slow sync cost: 202 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42575,DS-3fd4f226-4fcd-46db-acad-aaba0db1519b,DISK], DatanodeInfoWithStorage[127.0.0.1:37503,DS-8a563e24-9cd4-4f83-a5d0-093d49ec545e,DISK]] 2024-12-03T18:54:21,071 INFO [FSHLog-0-hdfs://localhost:42673/user/jenkins/test-data/6807bb60-f78c-0ede-7870-b94fe8933208-prefix:db5a5ccf5be8,37095,1733252008279 {}] wal.AbstractFSWAL(1368): Slow sync cost: 202 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42575,DS-3fd4f226-4fcd-46db-acad-aaba0db1519b,DISK], DatanodeInfoWithStorage[127.0.0.1:37503,DS-8a563e24-9cd4-4f83-a5d0-093d49ec545e,DISK]] 2024-12-03T18:54:23,076 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-12-03T18:54:23,077 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor db5a5ccf5be8%2C37095%2C1733252008279.1733252063076 2024-12-03T18:54:26,703 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-03T18:54:28,115 INFO [Time-limited test {}] wal.AbstractFSWAL(1368): Slow sync cost: 5034 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42575,DS-3fd4f226-4fcd-46db-acad-aaba0db1519b,DISK], DatanodeInfoWithStorage[127.0.0.1:37503,DS-8a563e24-9cd4-4f83-a5d0-093d49ec545e,DISK]] 2024-12-03T18:54:28,117 WARN [Time-limited test {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5034 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42575,DS-3fd4f226-4fcd-46db-acad-aaba0db1519b,DISK], DatanodeInfoWithStorage[127.0.0.1:37503,DS-8a563e24-9cd4-4f83-a5d0-093d49ec545e,DISK]] 2024-12-03T18:54:28,117 DEBUG [regionserver/db5a5ccf5be8:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog db5a5ccf5be8%2C37095%2C1733252008279:(num 1733252063076) roll requested 2024-12-03T18:54:28,117 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:54:28,118 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:54:28,118 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:54:28,118 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:54:28,118 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:54:28,118 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/6807bb60-f78c-0ede-7870-b94fe8933208/WALs/db5a5ccf5be8,37095,1733252008279/db5a5ccf5be8%2C37095%2C1733252008279.1733252052244 with entries=4, filesize=4.63 KB; new WAL /user/jenkins/test-data/6807bb60-f78c-0ede-7870-b94fe8933208/WALs/db5a5ccf5be8,37095,1733252008279/db5a5ccf5be8%2C37095%2C1733252008279.1733252063076 2024-12-03T18:54:28,120 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46773:46773),(127.0.0.1/127.0.0.1:40269:40269)] 2024-12-03T18:54:28,120 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:42673/user/jenkins/test-data/6807bb60-f78c-0ede-7870-b94fe8933208/WALs/db5a5ccf5be8,37095,1733252008279/db5a5ccf5be8%2C37095%2C1733252008279.1733252052244 is not closed yet, will try archiving it next time 2024-12-03T18:54:28,121 INFO [regionserver/db5a5ccf5be8:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor db5a5ccf5be8%2C37095%2C1733252008279.1733252068120 2024-12-03T18:54:28,122 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37503 is added to blk_1073741841_1017 (size=4753) 2024-12-03T18:54:28,123 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42575 is added to blk_1073741841_1017 (size=4753) 2024-12-03T18:54:33,125 INFO [FSHLog-0-hdfs://localhost:42673/user/jenkins/test-data/6807bb60-f78c-0ede-7870-b94fe8933208-prefix:db5a5ccf5be8,37095,1733252008279 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5002 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42575,DS-3fd4f226-4fcd-46db-acad-aaba0db1519b,DISK], DatanodeInfoWithStorage[127.0.0.1:37503,DS-8a563e24-9cd4-4f83-a5d0-093d49ec545e,DISK]] 2024-12-03T18:54:33,125 WARN [FSHLog-0-hdfs://localhost:42673/user/jenkins/test-data/6807bb60-f78c-0ede-7870-b94fe8933208-prefix:db5a5ccf5be8,37095,1733252008279 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5002 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42575,DS-3fd4f226-4fcd-46db-acad-aaba0db1519b,DISK], DatanodeInfoWithStorage[127.0.0.1:37503,DS-8a563e24-9cd4-4f83-a5d0-093d49ec545e,DISK]] 2024-12-03T18:54:33,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37095 {}] regionserver.HRegion(8855): Flush requested on 0e6b1d836297524bb6b7585c1f9577c5 2024-12-03T18:54:33,126 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 0e6b1d836297524bb6b7585c1f9577c5 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-03T18:54:33,192 INFO [regionserver/db5a5ccf5be8:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 5063 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42575,DS-3fd4f226-4fcd-46db-acad-aaba0db1519b,DISK], DatanodeInfoWithStorage[127.0.0.1:37503,DS-8a563e24-9cd4-4f83-a5d0-093d49ec545e,DISK]] 2024-12-03T18:54:33,192 WARN [regionserver/db5a5ccf5be8:0.logRoller {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5063 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42575,DS-3fd4f226-4fcd-46db-acad-aaba0db1519b,DISK], DatanodeInfoWithStorage[127.0.0.1:37503,DS-8a563e24-9cd4-4f83-a5d0-093d49ec545e,DISK]] 2024-12-03T18:54:35,127 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-12-03T18:54:38,181 INFO [FSHLog-0-hdfs://localhost:42673/user/jenkins/test-data/6807bb60-f78c-0ede-7870-b94fe8933208-prefix:db5a5ccf5be8,37095,1733252008279 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5053 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42575,DS-3fd4f226-4fcd-46db-acad-aaba0db1519b,DISK], DatanodeInfoWithStorage[127.0.0.1:37503,DS-8a563e24-9cd4-4f83-a5d0-093d49ec545e,DISK]] 2024-12-03T18:54:38,181 WARN [FSHLog-0-hdfs://localhost:42673/user/jenkins/test-data/6807bb60-f78c-0ede-7870-b94fe8933208-prefix:db5a5ccf5be8,37095,1733252008279 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5053 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42575,DS-3fd4f226-4fcd-46db-acad-aaba0db1519b,DISK], DatanodeInfoWithStorage[127.0.0.1:37503,DS-8a563e24-9cd4-4f83-a5d0-093d49ec545e,DISK]] 2024-12-03T18:54:38,181 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:54:38,181 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:54:38,182 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:54:38,182 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:54:38,182 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:54:38,183 INFO [regionserver/db5a5ccf5be8:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/6807bb60-f78c-0ede-7870-b94fe8933208/WALs/db5a5ccf5be8,37095,1733252008279/db5a5ccf5be8%2C37095%2C1733252008279.1733252063076 with entries=2, filesize=1.52 KB; new WAL /user/jenkins/test-data/6807bb60-f78c-0ede-7870-b94fe8933208/WALs/db5a5ccf5be8,37095,1733252008279/db5a5ccf5be8%2C37095%2C1733252008279.1733252068120 2024-12-03T18:54:38,184 DEBUG [regionserver/db5a5ccf5be8:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40269:40269),(127.0.0.1/127.0.0.1:46773:46773)] 2024-12-03T18:54:38,184 DEBUG [regionserver/db5a5ccf5be8:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:42673/user/jenkins/test-data/6807bb60-f78c-0ede-7870-b94fe8933208/WALs/db5a5ccf5be8,37095,1733252008279/db5a5ccf5be8%2C37095%2C1733252008279.1733252063076 is not closed yet, will try archiving it next time 2024-12-03T18:54:38,184 DEBUG [regionserver/db5a5ccf5be8:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog db5a5ccf5be8%2C37095%2C1733252008279:(num 1733252068120) roll requested 2024-12-03T18:54:38,185 INFO [regionserver/db5a5ccf5be8:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor db5a5ccf5be8%2C37095%2C1733252008279.1733252078185 2024-12-03T18:54:38,186 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37503 is added to blk_1073741842_1018 (size=1569) 2024-12-03T18:54:38,188 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42575 is added to blk_1073741842_1018 (size=1569) 2024-12-03T18:54:38,189 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42673/user/jenkins/test-data/6807bb60-f78c-0ede-7870-b94fe8933208/data/default/TestLogRolling-testSlowSyncLogRolling/0e6b1d836297524bb6b7585c1f9577c5/.tmp/info/7ee28134d7ba41719831eda5c4a370c0 is 1080, key is row0015/info:/1733252049828/Put/seqid=0 2024-12-03T18:54:38,197 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42575 is added to blk_1073741844_1020 (size=12509) 2024-12-03T18:54:38,198 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37503 is added to blk_1073741844_1020 (size=12509) 2024-12-03T18:54:38,198 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=31 (bloomFilter=true), to=hdfs://localhost:42673/user/jenkins/test-data/6807bb60-f78c-0ede-7870-b94fe8933208/data/default/TestLogRolling-testSlowSyncLogRolling/0e6b1d836297524bb6b7585c1f9577c5/.tmp/info/7ee28134d7ba41719831eda5c4a370c0 2024-12-03T18:54:38,208 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42673/user/jenkins/test-data/6807bb60-f78c-0ede-7870-b94fe8933208/data/default/TestLogRolling-testSlowSyncLogRolling/0e6b1d836297524bb6b7585c1f9577c5/.tmp/info/7ee28134d7ba41719831eda5c4a370c0 as hdfs://localhost:42673/user/jenkins/test-data/6807bb60-f78c-0ede-7870-b94fe8933208/data/default/TestLogRolling-testSlowSyncLogRolling/0e6b1d836297524bb6b7585c1f9577c5/info/7ee28134d7ba41719831eda5c4a370c0 2024-12-03T18:54:38,218 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42673/user/jenkins/test-data/6807bb60-f78c-0ede-7870-b94fe8933208/data/default/TestLogRolling-testSlowSyncLogRolling/0e6b1d836297524bb6b7585c1f9577c5/info/7ee28134d7ba41719831eda5c4a370c0, entries=7, sequenceid=31, filesize=12.2 K 2024-12-03T18:54:43,194 INFO [regionserver/db5a5ccf5be8:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 5005 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37503,DS-8a563e24-9cd4-4f83-a5d0-093d49ec545e,DISK], DatanodeInfoWithStorage[127.0.0.1:42575,DS-3fd4f226-4fcd-46db-acad-aaba0db1519b,DISK]] 2024-12-03T18:54:43,194 WARN [regionserver/db5a5ccf5be8:0.logRoller {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5005 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37503,DS-8a563e24-9cd4-4f83-a5d0-093d49ec545e,DISK], DatanodeInfoWithStorage[127.0.0.1:42575,DS-3fd4f226-4fcd-46db-acad-aaba0db1519b,DISK]] 2024-12-03T18:54:43,220 INFO [FSHLog-0-hdfs://localhost:42673/user/jenkins/test-data/6807bb60-f78c-0ede-7870-b94fe8933208-prefix:db5a5ccf5be8,37095,1733252008279 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37503,DS-8a563e24-9cd4-4f83-a5d0-093d49ec545e,DISK], DatanodeInfoWithStorage[127.0.0.1:42575,DS-3fd4f226-4fcd-46db-acad-aaba0db1519b,DISK]] 2024-12-03T18:54:43,220 WARN [FSHLog-0-hdfs://localhost:42673/user/jenkins/test-data/6807bb60-f78c-0ede-7870-b94fe8933208-prefix:db5a5ccf5be8,37095,1733252008279 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5000 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37503,DS-8a563e24-9cd4-4f83-a5d0-093d49ec545e,DISK], DatanodeInfoWithStorage[127.0.0.1:42575,DS-3fd4f226-4fcd-46db-acad-aaba0db1519b,DISK]] 2024-12-03T18:54:43,220 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 0e6b1d836297524bb6b7585c1f9577c5 in 10095ms, sequenceid=31, compaction requested=true 2024-12-03T18:54:43,220 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:54:43,220 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 0e6b1d836297524bb6b7585c1f9577c5: 2024-12-03T18:54:43,220 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:54:43,220 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=36.6 K, sizeToCheck=16.0 K 2024-12-03T18:54:43,220 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-03T18:54:43,220 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:54:43,221 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:42673/user/jenkins/test-data/6807bb60-f78c-0ede-7870-b94fe8933208/data/default/TestLogRolling-testSlowSyncLogRolling/0e6b1d836297524bb6b7585c1f9577c5/info/adfa34d7c56f46a9ade09ff23adbdd2c because midkey is the same as first or last row 2024-12-03T18:54:43,221 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:54:43,221 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:54:43,221 INFO [regionserver/db5a5ccf5be8:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/6807bb60-f78c-0ede-7870-b94fe8933208/WALs/db5a5ccf5be8,37095,1733252008279/db5a5ccf5be8%2C37095%2C1733252008279.1733252068120 with entries=1, filesize=430 B; new WAL /user/jenkins/test-data/6807bb60-f78c-0ede-7870-b94fe8933208/WALs/db5a5ccf5be8,37095,1733252008279/db5a5ccf5be8%2C37095%2C1733252008279.1733252078185 2024-12-03T18:54:43,222 DEBUG [regionserver/db5a5ccf5be8:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40269:40269),(127.0.0.1/127.0.0.1:46773:46773)] 2024-12-03T18:54:43,222 DEBUG [regionserver/db5a5ccf5be8:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:42673/user/jenkins/test-data/6807bb60-f78c-0ede-7870-b94fe8933208/WALs/db5a5ccf5be8,37095,1733252008279/db5a5ccf5be8%2C37095%2C1733252008279.1733252068120 is not closed yet, will try archiving it next time 2024-12-03T18:54:43,222 DEBUG [regionserver/db5a5ccf5be8:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog db5a5ccf5be8%2C37095%2C1733252008279:(num 1733252078185) roll requested 2024-12-03T18:54:43,222 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:42673/user/jenkins/test-data/6807bb60-f78c-0ede-7870-b94fe8933208/WALs/db5a5ccf5be8,37095,1733252008279/db5a5ccf5be8%2C37095%2C1733252008279.1733252040773 to hdfs://localhost:42673/user/jenkins/test-data/6807bb60-f78c-0ede-7870-b94fe8933208/oldWALs/db5a5ccf5be8%2C37095%2C1733252008279.1733252040773 2024-12-03T18:54:43,223 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor db5a5ccf5be8%2C37095%2C1733252008279.1733252083222 2024-12-03T18:54:43,223 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0e6b1d836297524bb6b7585c1f9577c5:info, priority=-2147483648, current under compaction store size is 1 2024-12-03T18:54:43,224 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42575 is added to blk_1073741843_1019 (size=438) 2024-12-03T18:54:43,224 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37503 is added to blk_1073741843_1019 (size=438) 2024-12-03T18:54:43,225 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:42673/user/jenkins/test-data/6807bb60-f78c-0ede-7870-b94fe8933208/WALs/db5a5ccf5be8,37095,1733252008279/db5a5ccf5be8%2C37095%2C1733252008279.1733252052244 to hdfs://localhost:42673/user/jenkins/test-data/6807bb60-f78c-0ede-7870-b94fe8933208/oldWALs/db5a5ccf5be8%2C37095%2C1733252008279.1733252052244 2024-12-03T18:54:43,225 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T18:54:43,227 DEBUG [RS:0;db5a5ccf5be8:37095-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-03T18:54:43,227 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:42673/user/jenkins/test-data/6807bb60-f78c-0ede-7870-b94fe8933208/WALs/db5a5ccf5be8,37095,1733252008279/db5a5ccf5be8%2C37095%2C1733252008279.1733252063076 to hdfs://localhost:42673/user/jenkins/test-data/6807bb60-f78c-0ede-7870-b94fe8933208/oldWALs/db5a5ccf5be8%2C37095%2C1733252008279.1733252063076 2024-12-03T18:54:43,230 DEBUG [RS:0;db5a5ccf5be8:37095-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37527 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-03T18:54:43,232 DEBUG [RS:0;db5a5ccf5be8:37095-shortCompactions-0 {}] regionserver.HStore(1541): 0e6b1d836297524bb6b7585c1f9577c5/info is initiating minor compaction (all files) 2024-12-03T18:54:43,232 INFO [RS:0;db5a5ccf5be8:37095-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 0e6b1d836297524bb6b7585c1f9577c5/info in TestLogRolling-testSlowSyncLogRolling,,1733252010564.0e6b1d836297524bb6b7585c1f9577c5. 2024-12-03T18:54:43,233 INFO [RS:0;db5a5ccf5be8:37095-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42673/user/jenkins/test-data/6807bb60-f78c-0ede-7870-b94fe8933208/data/default/TestLogRolling-testSlowSyncLogRolling/0e6b1d836297524bb6b7585c1f9577c5/info/adfa34d7c56f46a9ade09ff23adbdd2c, hdfs://localhost:42673/user/jenkins/test-data/6807bb60-f78c-0ede-7870-b94fe8933208/data/default/TestLogRolling-testSlowSyncLogRolling/0e6b1d836297524bb6b7585c1f9577c5/info/26572ef0a49f4b9cbcf526902bc63c48, hdfs://localhost:42673/user/jenkins/test-data/6807bb60-f78c-0ede-7870-b94fe8933208/data/default/TestLogRolling-testSlowSyncLogRolling/0e6b1d836297524bb6b7585c1f9577c5/info/7ee28134d7ba41719831eda5c4a370c0] into tmpdir=hdfs://localhost:42673/user/jenkins/test-data/6807bb60-f78c-0ede-7870-b94fe8933208/data/default/TestLogRolling-testSlowSyncLogRolling/0e6b1d836297524bb6b7585c1f9577c5/.tmp, totalSize=36.6 K 2024-12-03T18:54:43,235 DEBUG [RS:0;db5a5ccf5be8:37095-shortCompactions-0 {}] compactions.Compactor(225): Compacting adfa34d7c56f46a9ade09ff23adbdd2c, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1733252020711 2024-12-03T18:54:43,236 DEBUG [RS:0;db5a5ccf5be8:37095-shortCompactions-0 {}] compactions.Compactor(225): Compacting 26572ef0a49f4b9cbcf526902bc63c48, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=21, earliestPutTs=1733252034753 2024-12-03T18:54:43,236 DEBUG [RS:0;db5a5ccf5be8:37095-shortCompactions-0 {}] compactions.Compactor(225): Compacting 7ee28134d7ba41719831eda5c4a370c0, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=31, earliestPutTs=1733252049828 2024-12-03T18:54:43,237 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:54:43,237 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:54:43,237 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:54:43,237 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:54:43,238 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:54:43,238 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/6807bb60-f78c-0ede-7870-b94fe8933208/WALs/db5a5ccf5be8,37095,1733252008279/db5a5ccf5be8%2C37095%2C1733252008279.1733252078185 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/6807bb60-f78c-0ede-7870-b94fe8933208/WALs/db5a5ccf5be8,37095,1733252008279/db5a5ccf5be8%2C37095%2C1733252008279.1733252083222 2024-12-03T18:54:43,240 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42575 is added to blk_1073741845_1021 (size=93) 2024-12-03T18:54:43,242 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37503 is added to blk_1073741845_1021 (size=93) 2024-12-03T18:54:43,242 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:42673/user/jenkins/test-data/6807bb60-f78c-0ede-7870-b94fe8933208/WALs/db5a5ccf5be8,37095,1733252008279/db5a5ccf5be8%2C37095%2C1733252008279.1733252068120 is not closed yet, will try archiving it next time 2024-12-03T18:54:43,242 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:42673/user/jenkins/test-data/6807bb60-f78c-0ede-7870-b94fe8933208/WALs/db5a5ccf5be8,37095,1733252008279/db5a5ccf5be8%2C37095%2C1733252008279.1733252078185 to hdfs://localhost:42673/user/jenkins/test-data/6807bb60-f78c-0ede-7870-b94fe8933208/oldWALs/db5a5ccf5be8%2C37095%2C1733252008279.1733252078185 2024-12-03T18:54:43,243 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40269:40269),(127.0.0.1/127.0.0.1:46773:46773)] 2024-12-03T18:54:43,243 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:42673/user/jenkins/test-data/6807bb60-f78c-0ede-7870-b94fe8933208/WALs/db5a5ccf5be8,37095,1733252008279/db5a5ccf5be8%2C37095%2C1733252008279.1733252068120 is not closed yet, will try archiving it next time 2024-12-03T18:54:43,244 INFO [regionserver/db5a5ccf5be8:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor db5a5ccf5be8%2C37095%2C1733252008279.1733252083244 2024-12-03T18:54:43,254 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:54:43,254 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:54:43,254 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:54:43,254 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:54:43,254 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:54:43,254 INFO [regionserver/db5a5ccf5be8:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/6807bb60-f78c-0ede-7870-b94fe8933208/WALs/db5a5ccf5be8,37095,1733252008279/db5a5ccf5be8%2C37095%2C1733252008279.1733252083222 with entries=1, filesize=1.22 KB; new WAL /user/jenkins/test-data/6807bb60-f78c-0ede-7870-b94fe8933208/WALs/db5a5ccf5be8,37095,1733252008279/db5a5ccf5be8%2C37095%2C1733252008279.1733252083244 2024-12-03T18:54:43,257 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42575 is added to blk_1073741846_1022 (size=1258) 2024-12-03T18:54:43,257 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37503 is added to blk_1073741846_1022 (size=1258) 2024-12-03T18:54:43,258 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:42673/user/jenkins/test-data/6807bb60-f78c-0ede-7870-b94fe8933208/WALs/db5a5ccf5be8,37095,1733252008279/db5a5ccf5be8%2C37095%2C1733252008279.1733252068120 is not closed yet, will try archiving it next time 2024-12-03T18:54:43,264 DEBUG [regionserver/db5a5ccf5be8:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40269:40269),(127.0.0.1/127.0.0.1:46773:46773)] 2024-12-03T18:54:43,264 DEBUG [regionserver/db5a5ccf5be8:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:42673/user/jenkins/test-data/6807bb60-f78c-0ede-7870-b94fe8933208/WALs/db5a5ccf5be8,37095,1733252008279/db5a5ccf5be8%2C37095%2C1733252008279.1733252068120 is not closed yet, will try archiving it next time 2024-12-03T18:54:43,276 INFO [RS:0;db5a5ccf5be8:37095-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0e6b1d836297524bb6b7585c1f9577c5#info#compaction#3 average throughput is 10.77 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-03T18:54:43,278 DEBUG [RS:0;db5a5ccf5be8:37095-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42673/user/jenkins/test-data/6807bb60-f78c-0ede-7870-b94fe8933208/data/default/TestLogRolling-testSlowSyncLogRolling/0e6b1d836297524bb6b7585c1f9577c5/.tmp/info/9f4509604dcf4ea48fb5c8cabb76a15e is 1080, key is row0001/info:/1733252020711/Put/seqid=0 2024-12-03T18:54:43,284 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42575 is added to blk_1073741848_1024 (size=27710) 2024-12-03T18:54:43,284 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37503 is added to blk_1073741848_1024 (size=27710) 2024-12-03T18:54:43,297 DEBUG [RS:0;db5a5ccf5be8:37095-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42673/user/jenkins/test-data/6807bb60-f78c-0ede-7870-b94fe8933208/data/default/TestLogRolling-testSlowSyncLogRolling/0e6b1d836297524bb6b7585c1f9577c5/.tmp/info/9f4509604dcf4ea48fb5c8cabb76a15e as hdfs://localhost:42673/user/jenkins/test-data/6807bb60-f78c-0ede-7870-b94fe8933208/data/default/TestLogRolling-testSlowSyncLogRolling/0e6b1d836297524bb6b7585c1f9577c5/info/9f4509604dcf4ea48fb5c8cabb76a15e 2024-12-03T18:54:43,314 INFO [RS:0;db5a5ccf5be8:37095-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 0e6b1d836297524bb6b7585c1f9577c5/info of 0e6b1d836297524bb6b7585c1f9577c5 into 9f4509604dcf4ea48fb5c8cabb76a15e(size=27.1 K), total size for store is 27.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T18:54:43,314 DEBUG [RS:0;db5a5ccf5be8:37095-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 0e6b1d836297524bb6b7585c1f9577c5: 2024-12-03T18:54:43,315 INFO [RS:0;db5a5ccf5be8:37095-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testSlowSyncLogRolling,,1733252010564.0e6b1d836297524bb6b7585c1f9577c5., storeName=0e6b1d836297524bb6b7585c1f9577c5/info, priority=13, startTime=1733252083222; duration=0sec 2024-12-03T18:54:43,315 DEBUG [RS:0;db5a5ccf5be8:37095-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-12-03T18:54:43,316 DEBUG [RS:0;db5a5ccf5be8:37095-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-03T18:54:43,316 DEBUG [RS:0;db5a5ccf5be8:37095-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:42673/user/jenkins/test-data/6807bb60-f78c-0ede-7870-b94fe8933208/data/default/TestLogRolling-testSlowSyncLogRolling/0e6b1d836297524bb6b7585c1f9577c5/info/9f4509604dcf4ea48fb5c8cabb76a15e because midkey is the same as first or last row 2024-12-03T18:54:43,316 DEBUG [RS:0;db5a5ccf5be8:37095-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-12-03T18:54:43,316 DEBUG [RS:0;db5a5ccf5be8:37095-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-03T18:54:43,316 DEBUG [RS:0;db5a5ccf5be8:37095-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:42673/user/jenkins/test-data/6807bb60-f78c-0ede-7870-b94fe8933208/data/default/TestLogRolling-testSlowSyncLogRolling/0e6b1d836297524bb6b7585c1f9577c5/info/9f4509604dcf4ea48fb5c8cabb76a15e because midkey is the same as first or last row 2024-12-03T18:54:43,316 DEBUG [RS:0;db5a5ccf5be8:37095-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-12-03T18:54:43,316 DEBUG [RS:0;db5a5ccf5be8:37095-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-03T18:54:43,316 DEBUG [RS:0;db5a5ccf5be8:37095-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:42673/user/jenkins/test-data/6807bb60-f78c-0ede-7870-b94fe8933208/data/default/TestLogRolling-testSlowSyncLogRolling/0e6b1d836297524bb6b7585c1f9577c5/info/9f4509604dcf4ea48fb5c8cabb76a15e because midkey is the same as first or last row 2024-12-03T18:54:43,316 DEBUG [RS:0;db5a5ccf5be8:37095-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T18:54:43,316 DEBUG [RS:0;db5a5ccf5be8:37095-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0e6b1d836297524bb6b7585c1f9577c5:info 2024-12-03T18:54:43,625 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:42673/user/jenkins/test-data/6807bb60-f78c-0ede-7870-b94fe8933208/WALs/db5a5ccf5be8,37095,1733252008279/db5a5ccf5be8%2C37095%2C1733252008279.1733252068120 to hdfs://localhost:42673/user/jenkins/test-data/6807bb60-f78c-0ede-7870-b94fe8933208/oldWALs/db5a5ccf5be8%2C37095%2C1733252008279.1733252068120 2024-12-03T18:54:55,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37095 {}] regionserver.HRegion(8855): Flush requested on 0e6b1d836297524bb6b7585c1f9577c5 2024-12-03T18:54:55,282 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 0e6b1d836297524bb6b7585c1f9577c5 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-03T18:54:55,289 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42673/user/jenkins/test-data/6807bb60-f78c-0ede-7870-b94fe8933208/data/default/TestLogRolling-testSlowSyncLogRolling/0e6b1d836297524bb6b7585c1f9577c5/.tmp/info/6d700c2dca3c4e258502b3cdfae51bff is 1080, key is row0022/info:/1733252083245/Put/seqid=0 2024-12-03T18:54:55,295 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37503 is added to blk_1073741849_1025 (size=12509) 2024-12-03T18:54:55,296 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42575 is added to blk_1073741849_1025 (size=12509) 2024-12-03T18:54:55,296 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=42 (bloomFilter=true), to=hdfs://localhost:42673/user/jenkins/test-data/6807bb60-f78c-0ede-7870-b94fe8933208/data/default/TestLogRolling-testSlowSyncLogRolling/0e6b1d836297524bb6b7585c1f9577c5/.tmp/info/6d700c2dca3c4e258502b3cdfae51bff 2024-12-03T18:54:55,306 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42673/user/jenkins/test-data/6807bb60-f78c-0ede-7870-b94fe8933208/data/default/TestLogRolling-testSlowSyncLogRolling/0e6b1d836297524bb6b7585c1f9577c5/.tmp/info/6d700c2dca3c4e258502b3cdfae51bff as hdfs://localhost:42673/user/jenkins/test-data/6807bb60-f78c-0ede-7870-b94fe8933208/data/default/TestLogRolling-testSlowSyncLogRolling/0e6b1d836297524bb6b7585c1f9577c5/info/6d700c2dca3c4e258502b3cdfae51bff 2024-12-03T18:54:55,314 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42673/user/jenkins/test-data/6807bb60-f78c-0ede-7870-b94fe8933208/data/default/TestLogRolling-testSlowSyncLogRolling/0e6b1d836297524bb6b7585c1f9577c5/info/6d700c2dca3c4e258502b3cdfae51bff, entries=7, sequenceid=42, filesize=12.2 K 2024-12-03T18:54:55,315 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 0e6b1d836297524bb6b7585c1f9577c5 in 34ms, sequenceid=42, compaction requested=false 2024-12-03T18:54:55,315 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 0e6b1d836297524bb6b7585c1f9577c5: 2024-12-03T18:54:55,316 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=39.3 K, sizeToCheck=16.0 K 2024-12-03T18:54:55,316 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-03T18:54:55,316 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:42673/user/jenkins/test-data/6807bb60-f78c-0ede-7870-b94fe8933208/data/default/TestLogRolling-testSlowSyncLogRolling/0e6b1d836297524bb6b7585c1f9577c5/info/9f4509604dcf4ea48fb5c8cabb76a15e because midkey is the same as first or last row 2024-12-03T18:54:56,703 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-03T18:55:01,021 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 0e6b1d836297524bb6b7585c1f9577c5, had cached 0 bytes from a total of 40219 2024-12-03T18:55:03,309 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-03T18:55:03,310 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-03T18:55:03,311 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-03T18:55:03,320 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T18:55:03,321 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T18:55:03,322 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-03T18:55:03,322 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-03T18:55:03,322 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=245640440, stopped=false 2024-12-03T18:55:03,323 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=db5a5ccf5be8,37317,1733252007561 2024-12-03T18:55:03,379 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37095-0x1019c8abf090001, quorum=127.0.0.1:51229, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-03T18:55:03,379 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37317-0x1019c8abf090000, quorum=127.0.0.1:51229, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-03T18:55:03,379 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37095-0x1019c8abf090001, quorum=127.0.0.1:51229, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T18:55:03,379 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-03T18:55:03,379 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37317-0x1019c8abf090000, quorum=127.0.0.1:51229, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T18:55:03,380 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-03T18:55:03,381 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-03T18:55:03,381 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T18:55:03,381 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:37317-0x1019c8abf090000, quorum=127.0.0.1:51229, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-03T18:55:03,382 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:37095-0x1019c8abf090001, quorum=127.0.0.1:51229, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-03T18:55:03,383 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'db5a5ccf5be8,37095,1733252008279' ***** 2024-12-03T18:55:03,383 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-03T18:55:03,383 INFO [RS:0;db5a5ccf5be8:37095 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-03T18:55:03,384 INFO [RS:0;db5a5ccf5be8:37095 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-03T18:55:03,384 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-03T18:55:03,384 INFO [RS:0;db5a5ccf5be8:37095 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-03T18:55:03,384 INFO [RS:0;db5a5ccf5be8:37095 {}] regionserver.HRegionServer(3091): Received CLOSE for 0e6b1d836297524bb6b7585c1f9577c5 2024-12-03T18:55:03,385 INFO [RS:0;db5a5ccf5be8:37095 {}] regionserver.HRegionServer(959): stopping server db5a5ccf5be8,37095,1733252008279 2024-12-03T18:55:03,385 INFO [RS:0;db5a5ccf5be8:37095 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-03T18:55:03,385 INFO [RS:0;db5a5ccf5be8:37095 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;db5a5ccf5be8:37095. 2024-12-03T18:55:03,386 DEBUG [RS:0;db5a5ccf5be8:37095 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-03T18:55:03,386 DEBUG [RS:0;db5a5ccf5be8:37095 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T18:55:03,386 INFO [RS:0;db5a5ccf5be8:37095 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-03T18:55:03,386 DEBUG [RS_CLOSE_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 0e6b1d836297524bb6b7585c1f9577c5, disabling compactions & flushes 2024-12-03T18:55:03,386 INFO [RS:0;db5a5ccf5be8:37095 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-03T18:55:03,386 INFO [RS:0;db5a5ccf5be8:37095 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-03T18:55:03,386 INFO [RS_CLOSE_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testSlowSyncLogRolling,,1733252010564.0e6b1d836297524bb6b7585c1f9577c5. 2024-12-03T18:55:03,386 DEBUG [RS_CLOSE_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testSlowSyncLogRolling,,1733252010564.0e6b1d836297524bb6b7585c1f9577c5. 2024-12-03T18:55:03,386 INFO [RS:0;db5a5ccf5be8:37095 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-03T18:55:03,386 DEBUG [RS_CLOSE_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testSlowSyncLogRolling,,1733252010564.0e6b1d836297524bb6b7585c1f9577c5. after waiting 0 ms 2024-12-03T18:55:03,386 DEBUG [RS_CLOSE_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testSlowSyncLogRolling,,1733252010564.0e6b1d836297524bb6b7585c1f9577c5. 2024-12-03T18:55:03,387 INFO [RS_CLOSE_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing 0e6b1d836297524bb6b7585c1f9577c5 1/1 column families, dataSize=3.15 KB heapSize=3.63 KB 2024-12-03T18:55:03,387 INFO [RS:0;db5a5ccf5be8:37095 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-12-03T18:55:03,387 DEBUG [RS_CLOSE_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-03T18:55:03,387 DEBUG [RS:0;db5a5ccf5be8:37095 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, 0e6b1d836297524bb6b7585c1f9577c5=TestLogRolling-testSlowSyncLogRolling,,1733252010564.0e6b1d836297524bb6b7585c1f9577c5.} 2024-12-03T18:55:03,387 INFO [RS_CLOSE_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-03T18:55:03,387 DEBUG [RS_CLOSE_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-03T18:55:03,387 DEBUG [RS_CLOSE_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-03T18:55:03,388 DEBUG [RS_CLOSE_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-03T18:55:03,388 DEBUG [RS:0;db5a5ccf5be8:37095 {}] regionserver.HRegionServer(1351): Waiting on 0e6b1d836297524bb6b7585c1f9577c5, 1588230740 2024-12-03T18:55:03,388 INFO [RS_CLOSE_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.65 KB heapSize=3.67 KB 2024-12-03T18:55:03,395 DEBUG [RS_CLOSE_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42673/user/jenkins/test-data/6807bb60-f78c-0ede-7870-b94fe8933208/data/default/TestLogRolling-testSlowSyncLogRolling/0e6b1d836297524bb6b7585c1f9577c5/.tmp/info/4a15b5896efc491ea6f3e41451b6fc6a is 1080, key is row0029/info:/1733252097286/Put/seqid=0 2024-12-03T18:55:03,401 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37503 is added to blk_1073741850_1026 (size=8193) 2024-12-03T18:55:03,401 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42575 is added to blk_1073741850_1026 (size=8193) 2024-12-03T18:55:03,402 INFO [RS_CLOSE_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.15 KB at sequenceid=48 (bloomFilter=true), to=hdfs://localhost:42673/user/jenkins/test-data/6807bb60-f78c-0ede-7870-b94fe8933208/data/default/TestLogRolling-testSlowSyncLogRolling/0e6b1d836297524bb6b7585c1f9577c5/.tmp/info/4a15b5896efc491ea6f3e41451b6fc6a 2024-12-03T18:55:03,411 DEBUG [RS_CLOSE_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42673/user/jenkins/test-data/6807bb60-f78c-0ede-7870-b94fe8933208/data/default/TestLogRolling-testSlowSyncLogRolling/0e6b1d836297524bb6b7585c1f9577c5/.tmp/info/4a15b5896efc491ea6f3e41451b6fc6a as hdfs://localhost:42673/user/jenkins/test-data/6807bb60-f78c-0ede-7870-b94fe8933208/data/default/TestLogRolling-testSlowSyncLogRolling/0e6b1d836297524bb6b7585c1f9577c5/info/4a15b5896efc491ea6f3e41451b6fc6a 2024-12-03T18:55:03,414 DEBUG [RS_CLOSE_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42673/user/jenkins/test-data/6807bb60-f78c-0ede-7870-b94fe8933208/data/hbase/meta/1588230740/.tmp/info/d890439b6b3e4a4097de482a286dfca8 is 195, key is TestLogRolling-testSlowSyncLogRolling,,1733252010564.0e6b1d836297524bb6b7585c1f9577c5./info:regioninfo/1733252011042/Put/seqid=0 2024-12-03T18:55:03,419 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42575 is added to blk_1073741851_1027 (size=7016) 2024-12-03T18:55:03,420 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37503 is added to blk_1073741851_1027 (size=7016) 2024-12-03T18:55:03,420 INFO [RS_CLOSE_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42673/user/jenkins/test-data/6807bb60-f78c-0ede-7870-b94fe8933208/data/default/TestLogRolling-testSlowSyncLogRolling/0e6b1d836297524bb6b7585c1f9577c5/info/4a15b5896efc491ea6f3e41451b6fc6a, entries=3, sequenceid=48, filesize=8.0 K 2024-12-03T18:55:03,420 INFO [RS_CLOSE_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.45 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:42673/user/jenkins/test-data/6807bb60-f78c-0ede-7870-b94fe8933208/data/hbase/meta/1588230740/.tmp/info/d890439b6b3e4a4097de482a286dfca8 2024-12-03T18:55:03,421 INFO [RS_CLOSE_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 0e6b1d836297524bb6b7585c1f9577c5 in 35ms, sequenceid=48, compaction requested=true 2024-12-03T18:55:03,422 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1733252010564.0e6b1d836297524bb6b7585c1f9577c5.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:42673/user/jenkins/test-data/6807bb60-f78c-0ede-7870-b94fe8933208/data/default/TestLogRolling-testSlowSyncLogRolling/0e6b1d836297524bb6b7585c1f9577c5/info/adfa34d7c56f46a9ade09ff23adbdd2c, hdfs://localhost:42673/user/jenkins/test-data/6807bb60-f78c-0ede-7870-b94fe8933208/data/default/TestLogRolling-testSlowSyncLogRolling/0e6b1d836297524bb6b7585c1f9577c5/info/26572ef0a49f4b9cbcf526902bc63c48, hdfs://localhost:42673/user/jenkins/test-data/6807bb60-f78c-0ede-7870-b94fe8933208/data/default/TestLogRolling-testSlowSyncLogRolling/0e6b1d836297524bb6b7585c1f9577c5/info/7ee28134d7ba41719831eda5c4a370c0] to archive 2024-12-03T18:55:03,425 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1733252010564.0e6b1d836297524bb6b7585c1f9577c5.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-03T18:55:03,428 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1733252010564.0e6b1d836297524bb6b7585c1f9577c5.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42673/user/jenkins/test-data/6807bb60-f78c-0ede-7870-b94fe8933208/data/default/TestLogRolling-testSlowSyncLogRolling/0e6b1d836297524bb6b7585c1f9577c5/info/adfa34d7c56f46a9ade09ff23adbdd2c to hdfs://localhost:42673/user/jenkins/test-data/6807bb60-f78c-0ede-7870-b94fe8933208/archive/data/default/TestLogRolling-testSlowSyncLogRolling/0e6b1d836297524bb6b7585c1f9577c5/info/adfa34d7c56f46a9ade09ff23adbdd2c 2024-12-03T18:55:03,430 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1733252010564.0e6b1d836297524bb6b7585c1f9577c5.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42673/user/jenkins/test-data/6807bb60-f78c-0ede-7870-b94fe8933208/data/default/TestLogRolling-testSlowSyncLogRolling/0e6b1d836297524bb6b7585c1f9577c5/info/26572ef0a49f4b9cbcf526902bc63c48 to hdfs://localhost:42673/user/jenkins/test-data/6807bb60-f78c-0ede-7870-b94fe8933208/archive/data/default/TestLogRolling-testSlowSyncLogRolling/0e6b1d836297524bb6b7585c1f9577c5/info/26572ef0a49f4b9cbcf526902bc63c48 2024-12-03T18:55:03,432 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1733252010564.0e6b1d836297524bb6b7585c1f9577c5.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42673/user/jenkins/test-data/6807bb60-f78c-0ede-7870-b94fe8933208/data/default/TestLogRolling-testSlowSyncLogRolling/0e6b1d836297524bb6b7585c1f9577c5/info/7ee28134d7ba41719831eda5c4a370c0 to hdfs://localhost:42673/user/jenkins/test-data/6807bb60-f78c-0ede-7870-b94fe8933208/archive/data/default/TestLogRolling-testSlowSyncLogRolling/0e6b1d836297524bb6b7585c1f9577c5/info/7ee28134d7ba41719831eda5c4a370c0 2024-12-03T18:55:03,443 DEBUG [RS_CLOSE_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42673/user/jenkins/test-data/6807bb60-f78c-0ede-7870-b94fe8933208/data/hbase/meta/1588230740/.tmp/ns/ebdd064897bc4004ae8cdacbe574d5d2 is 43, key is default/ns:d/1733252010336/Put/seqid=0 2024-12-03T18:55:03,442 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1733252010564.0e6b1d836297524bb6b7585c1f9577c5.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=db5a5ccf5be8:37317 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 16 more 2024-12-03T18:55:03,447 WARN [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1733252010564.0e6b1d836297524bb6b7585c1f9577c5.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [adfa34d7c56f46a9ade09ff23adbdd2c=12509, 26572ef0a49f4b9cbcf526902bc63c48=12509, 7ee28134d7ba41719831eda5c4a370c0=12509] 2024-12-03T18:55:03,449 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37503 is added to blk_1073741852_1028 (size=5153) 2024-12-03T18:55:03,449 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42575 is added to blk_1073741852_1028 (size=5153) 2024-12-03T18:55:03,450 INFO [RS_CLOSE_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:42673/user/jenkins/test-data/6807bb60-f78c-0ede-7870-b94fe8933208/data/hbase/meta/1588230740/.tmp/ns/ebdd064897bc4004ae8cdacbe574d5d2 2024-12-03T18:55:03,452 DEBUG [RS_CLOSE_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42673/user/jenkins/test-data/6807bb60-f78c-0ede-7870-b94fe8933208/data/default/TestLogRolling-testSlowSyncLogRolling/0e6b1d836297524bb6b7585c1f9577c5/recovered.edits/51.seqid, newMaxSeqId=51, maxSeqId=1 2024-12-03T18:55:03,454 INFO [RS_CLOSE_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testSlowSyncLogRolling,,1733252010564.0e6b1d836297524bb6b7585c1f9577c5. 2024-12-03T18:55:03,455 DEBUG [RS_CLOSE_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 0e6b1d836297524bb6b7585c1f9577c5: Waiting for close lock at 1733252103385Running coprocessor pre-close hooks at 1733252103386 (+1 ms)Disabling compacts and flushes for region at 1733252103386Disabling writes for close at 1733252103386Obtaining lock to block concurrent updates at 1733252103387 (+1 ms)Preparing flush snapshotting stores in 0e6b1d836297524bb6b7585c1f9577c5 at 1733252103387Finished memstore snapshotting TestLogRolling-testSlowSyncLogRolling,,1733252010564.0e6b1d836297524bb6b7585c1f9577c5., syncing WAL and waiting on mvcc, flushsize=dataSize=3228, getHeapSize=3696, getOffHeapSize=0, getCellsCount=3 at 1733252103387Flushing stores of TestLogRolling-testSlowSyncLogRolling,,1733252010564.0e6b1d836297524bb6b7585c1f9577c5. at 1733252103389 (+2 ms)Flushing 0e6b1d836297524bb6b7585c1f9577c5/info: creating writer at 1733252103389Flushing 0e6b1d836297524bb6b7585c1f9577c5/info: appending metadata at 1733252103394 (+5 ms)Flushing 0e6b1d836297524bb6b7585c1f9577c5/info: closing flushed file at 1733252103394Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@660a2fc3: reopening flushed file at 1733252103410 (+16 ms)Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 0e6b1d836297524bb6b7585c1f9577c5 in 35ms, sequenceid=48, compaction requested=true at 1733252103421 (+11 ms)Writing region close event to WAL at 1733252103448 (+27 ms)Running coprocessor post-close hooks at 1733252103453 (+5 ms)Closed at 1733252103454 (+1 ms) 2024-12-03T18:55:03,455 DEBUG [RS_CLOSE_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testSlowSyncLogRolling,,1733252010564.0e6b1d836297524bb6b7585c1f9577c5. 2024-12-03T18:55:03,471 DEBUG [RS_CLOSE_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42673/user/jenkins/test-data/6807bb60-f78c-0ede-7870-b94fe8933208/data/hbase/meta/1588230740/.tmp/table/fe79d7149c634dc38e097c3192fbca1e is 73, key is TestLogRolling-testSlowSyncLogRolling/table:state/1733252011058/Put/seqid=0 2024-12-03T18:55:03,477 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42575 is added to blk_1073741853_1029 (size=5396) 2024-12-03T18:55:03,477 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37503 is added to blk_1073741853_1029 (size=5396) 2024-12-03T18:55:03,478 INFO [RS_CLOSE_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=138 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:42673/user/jenkins/test-data/6807bb60-f78c-0ede-7870-b94fe8933208/data/hbase/meta/1588230740/.tmp/table/fe79d7149c634dc38e097c3192fbca1e 2024-12-03T18:55:03,485 DEBUG [RS_CLOSE_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42673/user/jenkins/test-data/6807bb60-f78c-0ede-7870-b94fe8933208/data/hbase/meta/1588230740/.tmp/info/d890439b6b3e4a4097de482a286dfca8 as hdfs://localhost:42673/user/jenkins/test-data/6807bb60-f78c-0ede-7870-b94fe8933208/data/hbase/meta/1588230740/info/d890439b6b3e4a4097de482a286dfca8 2024-12-03T18:55:03,494 INFO [RS_CLOSE_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42673/user/jenkins/test-data/6807bb60-f78c-0ede-7870-b94fe8933208/data/hbase/meta/1588230740/info/d890439b6b3e4a4097de482a286dfca8, entries=10, sequenceid=11, filesize=6.9 K 2024-12-03T18:55:03,495 DEBUG [RS_CLOSE_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42673/user/jenkins/test-data/6807bb60-f78c-0ede-7870-b94fe8933208/data/hbase/meta/1588230740/.tmp/ns/ebdd064897bc4004ae8cdacbe574d5d2 as hdfs://localhost:42673/user/jenkins/test-data/6807bb60-f78c-0ede-7870-b94fe8933208/data/hbase/meta/1588230740/ns/ebdd064897bc4004ae8cdacbe574d5d2 2024-12-03T18:55:03,499 INFO [regionserver/db5a5ccf5be8:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-03T18:55:03,499 INFO [regionserver/db5a5ccf5be8:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-03T18:55:03,503 INFO [RS_CLOSE_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42673/user/jenkins/test-data/6807bb60-f78c-0ede-7870-b94fe8933208/data/hbase/meta/1588230740/ns/ebdd064897bc4004ae8cdacbe574d5d2, entries=2, sequenceid=11, filesize=5.0 K 2024-12-03T18:55:03,504 INFO [regionserver/db5a5ccf5be8:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-03T18:55:03,505 DEBUG [RS_CLOSE_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42673/user/jenkins/test-data/6807bb60-f78c-0ede-7870-b94fe8933208/data/hbase/meta/1588230740/.tmp/table/fe79d7149c634dc38e097c3192fbca1e as hdfs://localhost:42673/user/jenkins/test-data/6807bb60-f78c-0ede-7870-b94fe8933208/data/hbase/meta/1588230740/table/fe79d7149c634dc38e097c3192fbca1e 2024-12-03T18:55:03,512 INFO [RS_CLOSE_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42673/user/jenkins/test-data/6807bb60-f78c-0ede-7870-b94fe8933208/data/hbase/meta/1588230740/table/fe79d7149c634dc38e097c3192fbca1e, entries=2, sequenceid=11, filesize=5.3 K 2024-12-03T18:55:03,514 INFO [RS_CLOSE_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.65 KB/1692, heapSize ~3.38 KB/3456, currentSize=0 B/0 for 1588230740 in 126ms, sequenceid=11, compaction requested=false 2024-12-03T18:55:03,520 DEBUG [RS_CLOSE_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42673/user/jenkins/test-data/6807bb60-f78c-0ede-7870-b94fe8933208/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-12-03T18:55:03,521 DEBUG [RS_CLOSE_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-03T18:55:03,521 INFO [RS_CLOSE_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-03T18:55:03,521 DEBUG [RS_CLOSE_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733252103387Running coprocessor pre-close hooks at 1733252103387Disabling compacts and flushes for region at 1733252103387Disabling writes for close at 1733252103388 (+1 ms)Obtaining lock to block concurrent updates at 1733252103388Preparing flush snapshotting stores in 1588230740 at 1733252103388Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1692, getHeapSize=3696, getOffHeapSize=0, getCellsCount=14 at 1733252103389 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1733252103390 (+1 ms)Flushing 1588230740/info: creating writer at 1733252103390Flushing 1588230740/info: appending metadata at 1733252103413 (+23 ms)Flushing 1588230740/info: closing flushed file at 1733252103414 (+1 ms)Flushing 1588230740/ns: creating writer at 1733252103428 (+14 ms)Flushing 1588230740/ns: appending metadata at 1733252103443 (+15 ms)Flushing 1588230740/ns: closing flushed file at 1733252103443Flushing 1588230740/table: creating writer at 1733252103457 (+14 ms)Flushing 1588230740/table: appending metadata at 1733252103470 (+13 ms)Flushing 1588230740/table: closing flushed file at 1733252103470Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3e0eb3c0: reopening flushed file at 1733252103484 (+14 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@779d1d20: reopening flushed file at 1733252103494 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3f36e05a: reopening flushed file at 1733252103503 (+9 ms)Finished flush of dataSize ~1.65 KB/1692, heapSize ~3.38 KB/3456, currentSize=0 B/0 for 1588230740 in 126ms, sequenceid=11, compaction requested=false at 1733252103514 (+11 ms)Writing region close event to WAL at 1733252103515 (+1 ms)Running coprocessor post-close hooks at 1733252103521 (+6 ms)Closed at 1733252103521 2024-12-03T18:55:03,521 DEBUG [RS_CLOSE_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-03T18:55:03,588 INFO [RS:0;db5a5ccf5be8:37095 {}] regionserver.HRegionServer(976): stopping server db5a5ccf5be8,37095,1733252008279; all regions closed. 2024-12-03T18:55:03,590 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:55:03,590 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:55:03,591 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:55:03,591 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:55:03,591 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:55:03,595 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42575 is added to blk_1073741834_1010 (size=3066) 2024-12-03T18:55:03,595 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37503 is added to blk_1073741834_1010 (size=3066) 2024-12-03T18:55:03,600 DEBUG [RS:0;db5a5ccf5be8:37095 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/6807bb60-f78c-0ede-7870-b94fe8933208/oldWALs 2024-12-03T18:55:03,600 INFO [RS:0;db5a5ccf5be8:37095 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog db5a5ccf5be8%2C37095%2C1733252008279.meta:.meta(num 1733252010166) 2024-12-03T18:55:03,600 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:55:03,601 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:55:03,601 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:55:03,601 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:55:03,601 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:55:03,604 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42575 is added to blk_1073741847_1023 (size=12695) 2024-12-03T18:55:03,605 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37503 is added to blk_1073741847_1023 (size=12695) 2024-12-03T18:55:03,609 DEBUG [RS:0;db5a5ccf5be8:37095 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/6807bb60-f78c-0ede-7870-b94fe8933208/oldWALs 2024-12-03T18:55:03,609 INFO [RS:0;db5a5ccf5be8:37095 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog db5a5ccf5be8%2C37095%2C1733252008279:(num 1733252083244) 2024-12-03T18:55:03,609 DEBUG [RS:0;db5a5ccf5be8:37095 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T18:55:03,609 INFO [RS:0;db5a5ccf5be8:37095 {}] regionserver.LeaseManager(133): Closed leases 2024-12-03T18:55:03,609 INFO [RS:0;db5a5ccf5be8:37095 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-03T18:55:03,609 INFO [RS:0;db5a5ccf5be8:37095 {}] hbase.ChoreService(370): Chore service for: regionserver/db5a5ccf5be8:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-03T18:55:03,609 INFO [RS:0;db5a5ccf5be8:37095 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-03T18:55:03,610 INFO [regionserver/db5a5ccf5be8:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-03T18:55:03,610 INFO [RS:0;db5a5ccf5be8:37095 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:37095 2024-12-03T18:55:03,631 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37317-0x1019c8abf090000, quorum=127.0.0.1:51229, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-03T18:55:03,631 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37095-0x1019c8abf090001, quorum=127.0.0.1:51229, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/db5a5ccf5be8,37095,1733252008279 2024-12-03T18:55:03,631 INFO [RS:0;db5a5ccf5be8:37095 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-03T18:55:03,633 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [db5a5ccf5be8,37095,1733252008279] 2024-12-03T18:55:03,652 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/db5a5ccf5be8,37095,1733252008279 already deleted, retry=false 2024-12-03T18:55:03,653 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; db5a5ccf5be8,37095,1733252008279 expired; onlineServers=0 2024-12-03T18:55:03,653 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'db5a5ccf5be8,37317,1733252007561' ***** 2024-12-03T18:55:03,653 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-03T18:55:03,653 INFO [M:0;db5a5ccf5be8:37317 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-03T18:55:03,654 INFO [M:0;db5a5ccf5be8:37317 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-03T18:55:03,654 DEBUG [M:0;db5a5ccf5be8:37317 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-03T18:55:03,654 DEBUG [M:0;db5a5ccf5be8:37317 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-03T18:55:03,654 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-03T18:55:03,654 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster-HFileCleaner.small.0-1733252009393 {}] cleaner.HFileCleaner(306): Exit Thread[master/db5a5ccf5be8:0:becomeActiveMaster-HFileCleaner.small.0-1733252009393,5,FailOnTimeoutGroup] 2024-12-03T18:55:03,654 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster-HFileCleaner.large.0-1733252009390 {}] cleaner.HFileCleaner(306): Exit Thread[master/db5a5ccf5be8:0:becomeActiveMaster-HFileCleaner.large.0-1733252009390,5,FailOnTimeoutGroup] 2024-12-03T18:55:03,655 INFO [M:0;db5a5ccf5be8:37317 {}] hbase.ChoreService(370): Chore service for: master/db5a5ccf5be8:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-03T18:55:03,655 INFO [M:0;db5a5ccf5be8:37317 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-03T18:55:03,655 DEBUG [M:0;db5a5ccf5be8:37317 {}] master.HMaster(1795): Stopping service threads 2024-12-03T18:55:03,655 INFO [M:0;db5a5ccf5be8:37317 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-03T18:55:03,655 INFO [M:0;db5a5ccf5be8:37317 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-03T18:55:03,656 INFO [M:0;db5a5ccf5be8:37317 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-03T18:55:03,657 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-03T18:55:03,663 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37317-0x1019c8abf090000, quorum=127.0.0.1:51229, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-03T18:55:03,663 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37317-0x1019c8abf090000, quorum=127.0.0.1:51229, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T18:55:03,664 DEBUG [M:0;db5a5ccf5be8:37317 {}] zookeeper.ZKUtil(347): master:37317-0x1019c8abf090000, quorum=127.0.0.1:51229, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-03T18:55:03,664 WARN [M:0;db5a5ccf5be8:37317 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-03T18:55:03,665 INFO [M:0;db5a5ccf5be8:37317 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:42673/user/jenkins/test-data/6807bb60-f78c-0ede-7870-b94fe8933208/.lastflushedseqids 2024-12-03T18:55:03,679 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37503 is added to blk_1073741854_1030 (size=130) 2024-12-03T18:55:03,679 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42575 is added to blk_1073741854_1030 (size=130) 2024-12-03T18:55:03,680 INFO [M:0;db5a5ccf5be8:37317 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-03T18:55:03,680 INFO [M:0;db5a5ccf5be8:37317 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-03T18:55:03,680 DEBUG [M:0;db5a5ccf5be8:37317 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-03T18:55:03,680 INFO [M:0;db5a5ccf5be8:37317 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T18:55:03,680 DEBUG [M:0;db5a5ccf5be8:37317 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T18:55:03,681 DEBUG [M:0;db5a5ccf5be8:37317 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-03T18:55:03,681 DEBUG [M:0;db5a5ccf5be8:37317 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T18:55:03,681 INFO [M:0;db5a5ccf5be8:37317 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.01 KB heapSize=29.18 KB 2024-12-03T18:55:03,704 DEBUG [M:0;db5a5ccf5be8:37317 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42673/user/jenkins/test-data/6807bb60-f78c-0ede-7870-b94fe8933208/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/f479d42815a541c7a2234a5f2c93ef6f is 82, key is hbase:meta,,1/info:regioninfo/1733252010235/Put/seqid=0 2024-12-03T18:55:03,710 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37503 is added to blk_1073741855_1031 (size=5672) 2024-12-03T18:55:03,710 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42575 is added to blk_1073741855_1031 (size=5672) 2024-12-03T18:55:03,710 INFO [M:0;db5a5ccf5be8:37317 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:42673/user/jenkins/test-data/6807bb60-f78c-0ede-7870-b94fe8933208/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/f479d42815a541c7a2234a5f2c93ef6f 2024-12-03T18:55:03,730 DEBUG [M:0;db5a5ccf5be8:37317 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42673/user/jenkins/test-data/6807bb60-f78c-0ede-7870-b94fe8933208/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/77112378d1ba49d68f2d2ca253efcbfc is 765, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1733252011065/Put/seqid=0 2024-12-03T18:55:03,736 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42575 is added to blk_1073741856_1032 (size=6246) 2024-12-03T18:55:03,736 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37503 is added to blk_1073741856_1032 (size=6246) 2024-12-03T18:55:03,743 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37095-0x1019c8abf090001, quorum=127.0.0.1:51229, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-03T18:55:03,743 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37095-0x1019c8abf090001, quorum=127.0.0.1:51229, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-03T18:55:03,743 INFO [RS:0;db5a5ccf5be8:37095 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-03T18:55:03,743 INFO [RS:0;db5a5ccf5be8:37095 {}] regionserver.HRegionServer(1031): Exiting; stopping=db5a5ccf5be8,37095,1733252008279; zookeeper connection closed. 2024-12-03T18:55:03,743 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@4f23fd07 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@4f23fd07 2024-12-03T18:55:03,744 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-12-03T18:55:04,139 INFO [M:0;db5a5ccf5be8:37317 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.41 KB at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:42673/user/jenkins/test-data/6807bb60-f78c-0ede-7870-b94fe8933208/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/77112378d1ba49d68f2d2ca253efcbfc 2024-12-03T18:55:04,154 INFO [M:0;db5a5ccf5be8:37317 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 77112378d1ba49d68f2d2ca253efcbfc 2024-12-03T18:55:04,172 DEBUG [M:0;db5a5ccf5be8:37317 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42673/user/jenkins/test-data/6807bb60-f78c-0ede-7870-b94fe8933208/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/307b6bfa6f444b36be3fea62e4aed752 is 69, key is db5a5ccf5be8,37095,1733252008279/rs:state/1733252009421/Put/seqid=0 2024-12-03T18:55:04,178 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37503 is added to blk_1073741857_1033 (size=5156) 2024-12-03T18:55:04,178 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42575 is added to blk_1073741857_1033 (size=5156) 2024-12-03T18:55:04,179 INFO [M:0;db5a5ccf5be8:37317 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:42673/user/jenkins/test-data/6807bb60-f78c-0ede-7870-b94fe8933208/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/307b6bfa6f444b36be3fea62e4aed752 2024-12-03T18:55:04,199 DEBUG [M:0;db5a5ccf5be8:37317 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42673/user/jenkins/test-data/6807bb60-f78c-0ede-7870-b94fe8933208/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/325509aa45db4aecb25e7b12dbfe143a is 52, key is load_balancer_on/state:d/1733252010545/Put/seqid=0 2024-12-03T18:55:04,206 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42575 is added to blk_1073741858_1034 (size=5056) 2024-12-03T18:55:04,206 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37503 is added to blk_1073741858_1034 (size=5056) 2024-12-03T18:55:04,206 INFO [M:0;db5a5ccf5be8:37317 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:42673/user/jenkins/test-data/6807bb60-f78c-0ede-7870-b94fe8933208/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/325509aa45db4aecb25e7b12dbfe143a 2024-12-03T18:55:04,213 DEBUG [M:0;db5a5ccf5be8:37317 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42673/user/jenkins/test-data/6807bb60-f78c-0ede-7870-b94fe8933208/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/f479d42815a541c7a2234a5f2c93ef6f as hdfs://localhost:42673/user/jenkins/test-data/6807bb60-f78c-0ede-7870-b94fe8933208/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/f479d42815a541c7a2234a5f2c93ef6f 2024-12-03T18:55:04,220 INFO [M:0;db5a5ccf5be8:37317 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42673/user/jenkins/test-data/6807bb60-f78c-0ede-7870-b94fe8933208/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/f479d42815a541c7a2234a5f2c93ef6f, entries=8, sequenceid=59, filesize=5.5 K 2024-12-03T18:55:04,221 DEBUG [M:0;db5a5ccf5be8:37317 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42673/user/jenkins/test-data/6807bb60-f78c-0ede-7870-b94fe8933208/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/77112378d1ba49d68f2d2ca253efcbfc as hdfs://localhost:42673/user/jenkins/test-data/6807bb60-f78c-0ede-7870-b94fe8933208/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/77112378d1ba49d68f2d2ca253efcbfc 2024-12-03T18:55:04,228 INFO [M:0;db5a5ccf5be8:37317 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 77112378d1ba49d68f2d2ca253efcbfc 2024-12-03T18:55:04,228 INFO [M:0;db5a5ccf5be8:37317 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42673/user/jenkins/test-data/6807bb60-f78c-0ede-7870-b94fe8933208/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/77112378d1ba49d68f2d2ca253efcbfc, entries=6, sequenceid=59, filesize=6.1 K 2024-12-03T18:55:04,229 DEBUG [M:0;db5a5ccf5be8:37317 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42673/user/jenkins/test-data/6807bb60-f78c-0ede-7870-b94fe8933208/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/307b6bfa6f444b36be3fea62e4aed752 as hdfs://localhost:42673/user/jenkins/test-data/6807bb60-f78c-0ede-7870-b94fe8933208/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/307b6bfa6f444b36be3fea62e4aed752 2024-12-03T18:55:04,236 INFO [M:0;db5a5ccf5be8:37317 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42673/user/jenkins/test-data/6807bb60-f78c-0ede-7870-b94fe8933208/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/307b6bfa6f444b36be3fea62e4aed752, entries=1, sequenceid=59, filesize=5.0 K 2024-12-03T18:55:04,237 DEBUG [M:0;db5a5ccf5be8:37317 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42673/user/jenkins/test-data/6807bb60-f78c-0ede-7870-b94fe8933208/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/325509aa45db4aecb25e7b12dbfe143a as hdfs://localhost:42673/user/jenkins/test-data/6807bb60-f78c-0ede-7870-b94fe8933208/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/325509aa45db4aecb25e7b12dbfe143a 2024-12-03T18:55:04,244 INFO [M:0;db5a5ccf5be8:37317 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42673/user/jenkins/test-data/6807bb60-f78c-0ede-7870-b94fe8933208/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/325509aa45db4aecb25e7b12dbfe143a, entries=1, sequenceid=59, filesize=4.9 K 2024-12-03T18:55:04,245 INFO [M:0;db5a5ccf5be8:37317 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.01 KB/23564, heapSize ~29.12 KB/29816, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 564ms, sequenceid=59, compaction requested=false 2024-12-03T18:55:04,247 INFO [M:0;db5a5ccf5be8:37317 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T18:55:04,247 DEBUG [M:0;db5a5ccf5be8:37317 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733252103680Disabling compacts and flushes for region at 1733252103680Disabling writes for close at 1733252103681 (+1 ms)Obtaining lock to block concurrent updates at 1733252103681Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733252103681Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23564, getHeapSize=29816, getOffHeapSize=0, getCellsCount=70 at 1733252103682 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733252103683 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733252103683Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733252103703 (+20 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733252103703Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733252103716 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733252103729 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733252103730 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733252104154 (+424 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733252104171 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733252104172 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1733252104186 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1733252104199 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1733252104199Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@11741eec: reopening flushed file at 1733252104212 (+13 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@38b44ae6: reopening flushed file at 1733252104220 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@13dbb6e6: reopening flushed file at 1733252104228 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@583303c6: reopening flushed file at 1733252104236 (+8 ms)Finished flush of dataSize ~23.01 KB/23564, heapSize ~29.12 KB/29816, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 564ms, sequenceid=59, compaction requested=false at 1733252104245 (+9 ms)Writing region close event to WAL at 1733252104247 (+2 ms)Closed at 1733252104247 2024-12-03T18:55:04,248 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:55:04,248 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:55:04,248 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:55:04,248 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:55:04,248 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:55:04,251 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37503 is added to blk_1073741830_1006 (size=27961) 2024-12-03T18:55:04,251 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42575 is added to blk_1073741830_1006 (size=27961) 2024-12-03T18:55:04,252 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-03T18:55:04,252 INFO [M:0;db5a5ccf5be8:37317 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-03T18:55:04,252 INFO [M:0;db5a5ccf5be8:37317 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:37317 2024-12-03T18:55:04,252 INFO [M:0;db5a5ccf5be8:37317 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-03T18:55:04,406 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37317-0x1019c8abf090000, quorum=127.0.0.1:51229, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-03T18:55:04,406 INFO [M:0;db5a5ccf5be8:37317 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-03T18:55:04,406 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37317-0x1019c8abf090000, quorum=127.0.0.1:51229, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-03T18:55:04,482 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1bf97579{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T18:55:04,485 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@22b88bcb{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-03T18:55:04,486 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-03T18:55:04,486 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2d48d695{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-03T18:55:04,486 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@11effdcd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cf2fda39-4b5a-bc74-655b-4e38b1b1efcf/hadoop.log.dir/,STOPPED} 2024-12-03T18:55:04,490 WARN [BP-397332723-172.17.0.2-1733252003910 heartbeating to localhost/127.0.0.1:42673 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-03T18:55:04,490 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-03T18:55:04,490 WARN [BP-397332723-172.17.0.2-1733252003910 heartbeating to localhost/127.0.0.1:42673 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-397332723-172.17.0.2-1733252003910 (Datanode Uuid 2b32ee1c-a044-448c-9ab9-b2d4b4d7bd7b) service to localhost/127.0.0.1:42673 2024-12-03T18:55:04,490 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-03T18:55:04,492 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cf2fda39-4b5a-bc74-655b-4e38b1b1efcf/cluster_fd2f5795-a177-0b83-b69e-17d075f298ed/data/data3/current/BP-397332723-172.17.0.2-1733252003910 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T18:55:04,492 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cf2fda39-4b5a-bc74-655b-4e38b1b1efcf/cluster_fd2f5795-a177-0b83-b69e-17d075f298ed/data/data4/current/BP-397332723-172.17.0.2-1733252003910 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T18:55:04,492 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-03T18:55:04,495 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7b07d1ba{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T18:55:04,495 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@43e0a762{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-03T18:55:04,495 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-03T18:55:04,495 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@371e191c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-03T18:55:04,495 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@28778f0f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cf2fda39-4b5a-bc74-655b-4e38b1b1efcf/hadoop.log.dir/,STOPPED} 2024-12-03T18:55:04,497 WARN [BP-397332723-172.17.0.2-1733252003910 heartbeating to localhost/127.0.0.1:42673 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-03T18:55:04,497 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-03T18:55:04,497 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-03T18:55:04,497 WARN [BP-397332723-172.17.0.2-1733252003910 heartbeating to localhost/127.0.0.1:42673 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-397332723-172.17.0.2-1733252003910 (Datanode Uuid 2020067c-ee4b-43a7-b8b8-c20074820a82) service to localhost/127.0.0.1:42673 2024-12-03T18:55:04,497 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cf2fda39-4b5a-bc74-655b-4e38b1b1efcf/cluster_fd2f5795-a177-0b83-b69e-17d075f298ed/data/data1/current/BP-397332723-172.17.0.2-1733252003910 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T18:55:04,498 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cf2fda39-4b5a-bc74-655b-4e38b1b1efcf/cluster_fd2f5795-a177-0b83-b69e-17d075f298ed/data/data2/current/BP-397332723-172.17.0.2-1733252003910 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T18:55:04,498 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-03T18:55:04,506 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@735fa16a{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-03T18:55:04,507 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6c26a5a3{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-03T18:55:04,507 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-03T18:55:04,507 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@70be1389{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-03T18:55:04,507 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@ddc8467{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cf2fda39-4b5a-bc74-655b-4e38b1b1efcf/hadoop.log.dir/,STOPPED} 2024-12-03T18:55:04,515 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-03T18:55:04,543 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-03T18:55:04,550 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testSlowSyncLogRolling Thread=80 (was 12) Potentially hanging thread: Async-Client-Retry-Timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:42673 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-2-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: ForkJoinPool-2-worker-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: nioEventLoopGroup-5-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'HBase' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: weak-ref-cleaner-strictcontextstorage java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-2-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-1-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-2-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: region-location-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HBase-Metrics2-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-1-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:42673 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: org.apache.hadoop.hdfs.PeerCache@1ab9ef81 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-5-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:42673 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: SessionTracker java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Potentially hanging thread: master/db5a5ccf5be8:0:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: ForkJoinPool-2-worker-6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: ForkJoinPool-2-worker-7 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: master/db5a5ccf5be8:0:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:42673 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: ForkJoinPool-2-worker-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: regionserver/db5a5ccf5be8:0.procedureResultReporter java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Potentially hanging thread: LeaseRenewer:jenkins@localhost:42673 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:42673 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: SSL Certificates Store Monitor java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-4 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-5-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-4-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Idle-Rpc-Conn-Sweeper-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-1-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-4-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RpcClient-timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: LeaseRenewer:jenkins.hfs.0@localhost:42673 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-3-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-4-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: SnapshotHandlerChoreCleaner java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:42673 from jenkins.hfs.0 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: ForkJoinPool-2-worker-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: Time-limited test.named-queue-events-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: Monitor thread for TaskMonitor java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=402 (was 287) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=137 (was 258), ProcessCount=11 (was 11), AvailableMemoryMB=6416 (was 7340) 2024-12-03T18:55:04,556 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnDatanodeDeath Thread=81, OpenFileDescriptor=402, MaxFileDescriptor=1048576, SystemLoadAverage=137, ProcessCount=11, AvailableMemoryMB=6416 2024-12-03T18:55:04,556 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-03T18:55:04,557 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cf2fda39-4b5a-bc74-655b-4e38b1b1efcf/hadoop.log.dir so I do NOT create it in target/test-data/a25f0302-218a-bd59-7bd3-7631b6dc06d8 2024-12-03T18:55:04,557 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cf2fda39-4b5a-bc74-655b-4e38b1b1efcf/hadoop.tmp.dir so I do NOT create it in target/test-data/a25f0302-218a-bd59-7bd3-7631b6dc06d8 2024-12-03T18:55:04,557 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a25f0302-218a-bd59-7bd3-7631b6dc06d8/cluster_5a032f18-ccbd-67a6-bac0-464d99c54e3b, deleteOnExit=true 2024-12-03T18:55:04,557 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-03T18:55:04,557 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a25f0302-218a-bd59-7bd3-7631b6dc06d8/test.cache.data in system properties and HBase conf 2024-12-03T18:55:04,557 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a25f0302-218a-bd59-7bd3-7631b6dc06d8/hadoop.tmp.dir in system properties and HBase conf 2024-12-03T18:55:04,557 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a25f0302-218a-bd59-7bd3-7631b6dc06d8/hadoop.log.dir in system properties and HBase conf 2024-12-03T18:55:04,557 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a25f0302-218a-bd59-7bd3-7631b6dc06d8/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-03T18:55:04,557 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a25f0302-218a-bd59-7bd3-7631b6dc06d8/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-03T18:55:04,557 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-03T18:55:04,557 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-03T18:55:04,558 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a25f0302-218a-bd59-7bd3-7631b6dc06d8/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-03T18:55:04,558 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a25f0302-218a-bd59-7bd3-7631b6dc06d8/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-03T18:55:04,558 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a25f0302-218a-bd59-7bd3-7631b6dc06d8/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-03T18:55:04,558 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a25f0302-218a-bd59-7bd3-7631b6dc06d8/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-03T18:55:04,558 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a25f0302-218a-bd59-7bd3-7631b6dc06d8/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-03T18:55:04,558 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a25f0302-218a-bd59-7bd3-7631b6dc06d8/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-03T18:55:04,558 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a25f0302-218a-bd59-7bd3-7631b6dc06d8/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-03T18:55:04,558 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a25f0302-218a-bd59-7bd3-7631b6dc06d8/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-03T18:55:04,558 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a25f0302-218a-bd59-7bd3-7631b6dc06d8/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-03T18:55:04,558 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a25f0302-218a-bd59-7bd3-7631b6dc06d8/nfs.dump.dir in system properties and HBase conf 2024-12-03T18:55:04,558 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a25f0302-218a-bd59-7bd3-7631b6dc06d8/java.io.tmpdir in system properties and HBase conf 2024-12-03T18:55:04,558 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a25f0302-218a-bd59-7bd3-7631b6dc06d8/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-03T18:55:04,558 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a25f0302-218a-bd59-7bd3-7631b6dc06d8/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-03T18:55:04,559 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a25f0302-218a-bd59-7bd3-7631b6dc06d8/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-03T18:55:04,571 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-03T18:55:05,023 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-03T18:55:05,028 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-03T18:55:05,032 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-03T18:55:05,032 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-03T18:55:05,032 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-03T18:55:05,033 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-03T18:55:05,033 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2f841e9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a25f0302-218a-bd59-7bd3-7631b6dc06d8/hadoop.log.dir/,AVAILABLE} 2024-12-03T18:55:05,034 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@30a1c2a3{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-03T18:55:05,125 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7d95bc23{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a25f0302-218a-bd59-7bd3-7631b6dc06d8/java.io.tmpdir/jetty-localhost-46757-hadoop-hdfs-3_4_1-tests_jar-_-any-13727178406911933203/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-03T18:55:05,125 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@78fa6004{HTTP/1.1, (http/1.1)}{localhost:46757} 2024-12-03T18:55:05,126 INFO [Time-limited test {}] server.Server(415): Started @103002ms 2024-12-03T18:55:05,137 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-03T18:55:05,374 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-03T18:55:05,377 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-03T18:55:05,378 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-03T18:55:05,378 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-03T18:55:05,378 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-03T18:55:05,379 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3e23c0c8{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a25f0302-218a-bd59-7bd3-7631b6dc06d8/hadoop.log.dir/,AVAILABLE} 2024-12-03T18:55:05,379 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7517d9e5{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-03T18:55:05,469 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7d69c419{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a25f0302-218a-bd59-7bd3-7631b6dc06d8/java.io.tmpdir/jetty-localhost-43511-hadoop-hdfs-3_4_1-tests_jar-_-any-1542622403841437838/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T18:55:05,469 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2f2378c9{HTTP/1.1, (http/1.1)}{localhost:43511} 2024-12-03T18:55:05,469 INFO [Time-limited test {}] server.Server(415): Started @103346ms 2024-12-03T18:55:05,471 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-03T18:55:05,502 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-03T18:55:05,506 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-03T18:55:05,506 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-03T18:55:05,506 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-03T18:55:05,506 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-03T18:55:05,507 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1ce533a5{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a25f0302-218a-bd59-7bd3-7631b6dc06d8/hadoop.log.dir/,AVAILABLE} 2024-12-03T18:55:05,507 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@aab268d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-03T18:55:05,600 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@75434f63{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a25f0302-218a-bd59-7bd3-7631b6dc06d8/java.io.tmpdir/jetty-localhost-45765-hadoop-hdfs-3_4_1-tests_jar-_-any-12063283300346330296/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T18:55:05,600 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1a2066f8{HTTP/1.1, (http/1.1)}{localhost:45765} 2024-12-03T18:55:05,600 INFO [Time-limited test {}] server.Server(415): Started @103477ms 2024-12-03T18:55:05,601 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-03T18:55:06,603 WARN [Thread-452 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a25f0302-218a-bd59-7bd3-7631b6dc06d8/cluster_5a032f18-ccbd-67a6-bac0-464d99c54e3b/data/data1/current/BP-1452214034-172.17.0.2-1733252104586/current, will proceed with Du for space computation calculation, 2024-12-03T18:55:06,604 WARN [Thread-453 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a25f0302-218a-bd59-7bd3-7631b6dc06d8/cluster_5a032f18-ccbd-67a6-bac0-464d99c54e3b/data/data2/current/BP-1452214034-172.17.0.2-1733252104586/current, will proceed with Du for space computation calculation, 2024-12-03T18:55:06,627 WARN [Thread-416 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-03T18:55:06,629 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x732608ed8ade1130 with lease ID 0xb44223e61aac57a4: Processing first storage report for DS-3d459ca1-4781-43fc-9ba3-04438b09d062 from datanode DatanodeRegistration(127.0.0.1:38395, datanodeUuid=21509893-4fc1-4cac-80d7-7665318de056, infoPort=45843, infoSecurePort=0, ipcPort=44473, storageInfo=lv=-57;cid=testClusterID;nsid=1795435365;c=1733252104586) 2024-12-03T18:55:06,629 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x732608ed8ade1130 with lease ID 0xb44223e61aac57a4: from storage DS-3d459ca1-4781-43fc-9ba3-04438b09d062 node DatanodeRegistration(127.0.0.1:38395, datanodeUuid=21509893-4fc1-4cac-80d7-7665318de056, infoPort=45843, infoSecurePort=0, ipcPort=44473, storageInfo=lv=-57;cid=testClusterID;nsid=1795435365;c=1733252104586), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-03T18:55:06,629 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x732608ed8ade1130 with lease ID 0xb44223e61aac57a4: Processing first storage report for DS-64d5e00a-6a4c-4344-9800-60588c29e606 from datanode DatanodeRegistration(127.0.0.1:38395, datanodeUuid=21509893-4fc1-4cac-80d7-7665318de056, infoPort=45843, infoSecurePort=0, ipcPort=44473, storageInfo=lv=-57;cid=testClusterID;nsid=1795435365;c=1733252104586) 2024-12-03T18:55:06,629 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x732608ed8ade1130 with lease ID 0xb44223e61aac57a4: from storage DS-64d5e00a-6a4c-4344-9800-60588c29e606 node DatanodeRegistration(127.0.0.1:38395, datanodeUuid=21509893-4fc1-4cac-80d7-7665318de056, infoPort=45843, infoSecurePort=0, ipcPort=44473, storageInfo=lv=-57;cid=testClusterID;nsid=1795435365;c=1733252104586), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-03T18:55:06,736 WARN [Thread-463 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a25f0302-218a-bd59-7bd3-7631b6dc06d8/cluster_5a032f18-ccbd-67a6-bac0-464d99c54e3b/data/data3/current/BP-1452214034-172.17.0.2-1733252104586/current, will proceed with Du for space computation calculation, 2024-12-03T18:55:06,736 WARN [Thread-464 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a25f0302-218a-bd59-7bd3-7631b6dc06d8/cluster_5a032f18-ccbd-67a6-bac0-464d99c54e3b/data/data4/current/BP-1452214034-172.17.0.2-1733252104586/current, will proceed with Du for space computation calculation, 2024-12-03T18:55:06,750 WARN [Thread-439 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-03T18:55:06,753 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x48bf3d0632fcaca0 with lease ID 0xb44223e61aac57a5: Processing first storage report for DS-47e2e75b-66bb-4619-9d25-f28d9cb37692 from datanode DatanodeRegistration(127.0.0.1:37041, datanodeUuid=68f6f4d9-6929-4ccf-a34d-b00bc33c9b1e, infoPort=44039, infoSecurePort=0, ipcPort=33615, storageInfo=lv=-57;cid=testClusterID;nsid=1795435365;c=1733252104586) 2024-12-03T18:55:06,753 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x48bf3d0632fcaca0 with lease ID 0xb44223e61aac57a5: from storage DS-47e2e75b-66bb-4619-9d25-f28d9cb37692 node DatanodeRegistration(127.0.0.1:37041, datanodeUuid=68f6f4d9-6929-4ccf-a34d-b00bc33c9b1e, infoPort=44039, infoSecurePort=0, ipcPort=33615, storageInfo=lv=-57;cid=testClusterID;nsid=1795435365;c=1733252104586), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-03T18:55:06,753 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x48bf3d0632fcaca0 with lease ID 0xb44223e61aac57a5: Processing first storage report for DS-c0cb204f-088d-417d-ae0b-accb523ed749 from datanode DatanodeRegistration(127.0.0.1:37041, datanodeUuid=68f6f4d9-6929-4ccf-a34d-b00bc33c9b1e, infoPort=44039, infoSecurePort=0, ipcPort=33615, storageInfo=lv=-57;cid=testClusterID;nsid=1795435365;c=1733252104586) 2024-12-03T18:55:06,753 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x48bf3d0632fcaca0 with lease ID 0xb44223e61aac57a5: from storage DS-c0cb204f-088d-417d-ae0b-accb523ed749 node DatanodeRegistration(127.0.0.1:37041, datanodeUuid=68f6f4d9-6929-4ccf-a34d-b00bc33c9b1e, infoPort=44039, infoSecurePort=0, ipcPort=33615, storageInfo=lv=-57;cid=testClusterID;nsid=1795435365;c=1733252104586), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-03T18:55:06,840 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a25f0302-218a-bd59-7bd3-7631b6dc06d8 2024-12-03T18:55:06,844 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a25f0302-218a-bd59-7bd3-7631b6dc06d8/cluster_5a032f18-ccbd-67a6-bac0-464d99c54e3b/zookeeper_0, clientPort=49927, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a25f0302-218a-bd59-7bd3-7631b6dc06d8/cluster_5a032f18-ccbd-67a6-bac0-464d99c54e3b/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a25f0302-218a-bd59-7bd3-7631b6dc06d8/cluster_5a032f18-ccbd-67a6-bac0-464d99c54e3b/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-03T18:55:06,845 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=49927 2024-12-03T18:55:06,845 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T18:55:06,847 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T18:55:06,856 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38395 is added to blk_1073741825_1001 (size=7) 2024-12-03T18:55:06,857 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37041 is added to blk_1073741825_1001 (size=7) 2024-12-03T18:55:06,858 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:44241/user/jenkins/test-data/19e78551-52ce-f63a-82b9-a6abc14105f7 with version=8 2024-12-03T18:55:06,858 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:42673/user/jenkins/test-data/6807bb60-f78c-0ede-7870-b94fe8933208/hbase-staging 2024-12-03T18:55:06,861 INFO [Time-limited test {}] client.ConnectionUtils(128): master/db5a5ccf5be8:0 server-side Connection retries=45 2024-12-03T18:55:06,861 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-03T18:55:06,861 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-03T18:55:06,861 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-03T18:55:06,861 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-03T18:55:06,861 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-03T18:55:06,861 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-03T18:55:06,861 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-03T18:55:06,862 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:38949 2024-12-03T18:55:06,864 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:38949 connecting to ZooKeeper ensemble=127.0.0.1:49927 2024-12-03T18:55:06,919 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:389490x0, quorum=127.0.0.1:49927, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-03T18:55:06,920 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:38949-0x1019c8c46070000 connected 2024-12-03T18:55:07,001 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T18:55:07,007 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T18:55:07,012 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:38949-0x1019c8c46070000, quorum=127.0.0.1:49927, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-03T18:55:07,012 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:44241/user/jenkins/test-data/19e78551-52ce-f63a-82b9-a6abc14105f7, hbase.cluster.distributed=false 2024-12-03T18:55:07,014 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:38949-0x1019c8c46070000, quorum=127.0.0.1:49927, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-03T18:55:07,015 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=38949 2024-12-03T18:55:07,016 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=38949 2024-12-03T18:55:07,016 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=38949 2024-12-03T18:55:07,016 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=38949 2024-12-03T18:55:07,016 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=38949 2024-12-03T18:55:07,030 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/db5a5ccf5be8:0 server-side Connection retries=45 2024-12-03T18:55:07,031 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-03T18:55:07,031 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-03T18:55:07,031 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-03T18:55:07,031 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-03T18:55:07,031 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-03T18:55:07,031 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-03T18:55:07,031 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-03T18:55:07,032 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:36627 2024-12-03T18:55:07,034 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:36627 connecting to ZooKeeper ensemble=127.0.0.1:49927 2024-12-03T18:55:07,035 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T18:55:07,038 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T18:55:07,052 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:366270x0, quorum=127.0.0.1:49927, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-03T18:55:07,053 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:36627-0x1019c8c46070001, quorum=127.0.0.1:49927, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-03T18:55:07,053 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:36627-0x1019c8c46070001 connected 2024-12-03T18:55:07,054 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-03T18:55:07,054 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-03T18:55:07,055 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:36627-0x1019c8c46070001, quorum=127.0.0.1:49927, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-03T18:55:07,056 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:36627-0x1019c8c46070001, quorum=127.0.0.1:49927, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-03T18:55:07,057 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=36627 2024-12-03T18:55:07,057 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=36627 2024-12-03T18:55:07,057 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=36627 2024-12-03T18:55:07,062 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=36627 2024-12-03T18:55:07,062 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=36627 2024-12-03T18:55:07,075 DEBUG [M:0;db5a5ccf5be8:38949 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;db5a5ccf5be8:38949 2024-12-03T18:55:07,076 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/db5a5ccf5be8,38949,1733252106860 2024-12-03T18:55:07,084 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38949-0x1019c8c46070000, quorum=127.0.0.1:49927, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-03T18:55:07,084 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36627-0x1019c8c46070001, quorum=127.0.0.1:49927, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-03T18:55:07,084 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:38949-0x1019c8c46070000, quorum=127.0.0.1:49927, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/db5a5ccf5be8,38949,1733252106860 2024-12-03T18:55:07,094 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36627-0x1019c8c46070001, quorum=127.0.0.1:49927, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-03T18:55:07,094 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38949-0x1019c8c46070000, quorum=127.0.0.1:49927, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T18:55:07,094 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36627-0x1019c8c46070001, quorum=127.0.0.1:49927, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T18:55:07,095 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:38949-0x1019c8c46070000, quorum=127.0.0.1:49927, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-03T18:55:07,095 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/db5a5ccf5be8,38949,1733252106860 from backup master directory 2024-12-03T18:55:07,105 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38949-0x1019c8c46070000, quorum=127.0.0.1:49927, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/db5a5ccf5be8,38949,1733252106860 2024-12-03T18:55:07,105 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36627-0x1019c8c46070001, quorum=127.0.0.1:49927, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-03T18:55:07,105 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38949-0x1019c8c46070000, quorum=127.0.0.1:49927, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-03T18:55:07,105 WARN [master/db5a5ccf5be8:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-03T18:55:07,105 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=db5a5ccf5be8,38949,1733252106860 2024-12-03T18:55:07,109 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:44241/user/jenkins/test-data/19e78551-52ce-f63a-82b9-a6abc14105f7/hbase.id] with ID: 525b2ebb-0d8f-4aeb-81f2-96eb5c1096bd 2024-12-03T18:55:07,109 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:44241/user/jenkins/test-data/19e78551-52ce-f63a-82b9-a6abc14105f7/.tmp/hbase.id 2024-12-03T18:55:07,117 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37041 is added to blk_1073741826_1002 (size=42) 2024-12-03T18:55:07,117 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38395 is added to blk_1073741826_1002 (size=42) 2024-12-03T18:55:07,118 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:44241/user/jenkins/test-data/19e78551-52ce-f63a-82b9-a6abc14105f7/.tmp/hbase.id]:[hdfs://localhost:44241/user/jenkins/test-data/19e78551-52ce-f63a-82b9-a6abc14105f7/hbase.id] 2024-12-03T18:55:07,131 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T18:55:07,132 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-03T18:55:07,133 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-12-03T18:55:07,146 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36627-0x1019c8c46070001, quorum=127.0.0.1:49927, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T18:55:07,146 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38949-0x1019c8c46070000, quorum=127.0.0.1:49927, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T18:55:07,154 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37041 is added to blk_1073741827_1003 (size=196) 2024-12-03T18:55:07,154 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38395 is added to blk_1073741827_1003 (size=196) 2024-12-03T18:55:07,155 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-03T18:55:07,156 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-03T18:55:07,157 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-03T18:55:07,165 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38395 is added to blk_1073741828_1004 (size=1189) 2024-12-03T18:55:07,165 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37041 is added to blk_1073741828_1004 (size=1189) 2024-12-03T18:55:07,166 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:44241/user/jenkins/test-data/19e78551-52ce-f63a-82b9-a6abc14105f7/MasterData/data/master/store 2024-12-03T18:55:07,174 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37041 is added to blk_1073741829_1005 (size=34) 2024-12-03T18:55:07,174 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38395 is added to blk_1073741829_1005 (size=34) 2024-12-03T18:55:07,175 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T18:55:07,175 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-03T18:55:07,176 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T18:55:07,176 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T18:55:07,176 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-03T18:55:07,176 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T18:55:07,176 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T18:55:07,176 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733252107175Disabling compacts and flushes for region at 1733252107175Disabling writes for close at 1733252107176 (+1 ms)Writing region close event to WAL at 1733252107176Closed at 1733252107176 2024-12-03T18:55:07,177 WARN [master/db5a5ccf5be8:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:44241/user/jenkins/test-data/19e78551-52ce-f63a-82b9-a6abc14105f7/MasterData/data/master/store/.initializing 2024-12-03T18:55:07,178 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:44241/user/jenkins/test-data/19e78551-52ce-f63a-82b9-a6abc14105f7/MasterData/WALs/db5a5ccf5be8,38949,1733252106860 2024-12-03T18:55:07,180 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=db5a5ccf5be8%2C38949%2C1733252106860, suffix=, logDir=hdfs://localhost:44241/user/jenkins/test-data/19e78551-52ce-f63a-82b9-a6abc14105f7/MasterData/WALs/db5a5ccf5be8,38949,1733252106860, archiveDir=hdfs://localhost:44241/user/jenkins/test-data/19e78551-52ce-f63a-82b9-a6abc14105f7/MasterData/oldWALs, maxLogs=10 2024-12-03T18:55:07,181 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor db5a5ccf5be8%2C38949%2C1733252106860.1733252107181 2024-12-03T18:55:07,188 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/19e78551-52ce-f63a-82b9-a6abc14105f7/MasterData/WALs/db5a5ccf5be8,38949,1733252106860/db5a5ccf5be8%2C38949%2C1733252106860.1733252107181 2024-12-03T18:55:07,189 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45843:45843),(127.0.0.1/127.0.0.1:44039:44039)] 2024-12-03T18:55:07,189 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-03T18:55:07,190 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T18:55:07,190 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-03T18:55:07,190 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-03T18:55:07,192 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-03T18:55:07,193 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-03T18:55:07,193 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T18:55:07,194 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T18:55:07,194 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-03T18:55:07,196 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-03T18:55:07,196 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T18:55:07,197 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T18:55:07,197 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-03T18:55:07,199 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-03T18:55:07,199 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T18:55:07,200 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T18:55:07,200 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-03T18:55:07,202 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-03T18:55:07,202 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T18:55:07,202 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T18:55:07,203 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-03T18:55:07,203 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44241/user/jenkins/test-data/19e78551-52ce-f63a-82b9-a6abc14105f7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-03T18:55:07,204 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44241/user/jenkins/test-data/19e78551-52ce-f63a-82b9-a6abc14105f7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-03T18:55:07,206 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-03T18:55:07,206 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-03T18:55:07,206 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-03T18:55:07,208 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-03T18:55:07,210 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44241/user/jenkins/test-data/19e78551-52ce-f63a-82b9-a6abc14105f7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-03T18:55:07,211 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=866835, jitterRate=0.10223814845085144}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-03T18:55:07,212 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733252107190Initializing all the Stores at 1733252107191 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733252107191Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733252107191Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733252107191Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733252107191Cleaning up temporary data from old regions at 1733252107206 (+15 ms)Region opened successfully at 1733252107212 (+6 ms) 2024-12-03T18:55:07,212 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-03T18:55:07,216 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@462939ea, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=db5a5ccf5be8/172.17.0.2:0 2024-12-03T18:55:07,217 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-03T18:55:07,217 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-03T18:55:07,217 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-03T18:55:07,217 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-03T18:55:07,218 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-12-03T18:55:07,218 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-12-03T18:55:07,218 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-03T18:55:07,221 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-03T18:55:07,222 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38949-0x1019c8c46070000, quorum=127.0.0.1:49927, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-03T18:55:07,231 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-03T18:55:07,231 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-03T18:55:07,232 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38949-0x1019c8c46070000, quorum=127.0.0.1:49927, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-03T18:55:07,241 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-03T18:55:07,242 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-03T18:55:07,244 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38949-0x1019c8c46070000, quorum=127.0.0.1:49927, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-03T18:55:07,252 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-03T18:55:07,253 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38949-0x1019c8c46070000, quorum=127.0.0.1:49927, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-03T18:55:07,262 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-03T18:55:07,265 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38949-0x1019c8c46070000, quorum=127.0.0.1:49927, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-03T18:55:07,273 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-03T18:55:07,284 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36627-0x1019c8c46070001, quorum=127.0.0.1:49927, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-03T18:55:07,284 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38949-0x1019c8c46070000, quorum=127.0.0.1:49927, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-03T18:55:07,284 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36627-0x1019c8c46070001, quorum=127.0.0.1:49927, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T18:55:07,284 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38949-0x1019c8c46070000, quorum=127.0.0.1:49927, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T18:55:07,285 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=db5a5ccf5be8,38949,1733252106860, sessionid=0x1019c8c46070000, setting cluster-up flag (Was=false) 2024-12-03T18:55:07,305 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36627-0x1019c8c46070001, quorum=127.0.0.1:49927, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T18:55:07,305 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38949-0x1019c8c46070000, quorum=127.0.0.1:49927, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T18:55:07,337 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-03T18:55:07,340 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=db5a5ccf5be8,38949,1733252106860 2024-12-03T18:55:07,358 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38949-0x1019c8c46070000, quorum=127.0.0.1:49927, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T18:55:07,358 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36627-0x1019c8c46070001, quorum=127.0.0.1:49927, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T18:55:07,389 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-03T18:55:07,394 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=db5a5ccf5be8,38949,1733252106860 2024-12-03T18:55:07,397 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:44241/user/jenkins/test-data/19e78551-52ce-f63a-82b9-a6abc14105f7/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-03T18:55:07,401 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-03T18:55:07,401 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-03T18:55:07,401 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-03T18:55:07,401 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: db5a5ccf5be8,38949,1733252106860 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-03T18:55:07,404 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/db5a5ccf5be8:0, corePoolSize=5, maxPoolSize=5 2024-12-03T18:55:07,404 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/db5a5ccf5be8:0, corePoolSize=5, maxPoolSize=5 2024-12-03T18:55:07,404 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/db5a5ccf5be8:0, corePoolSize=5, maxPoolSize=5 2024-12-03T18:55:07,404 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/db5a5ccf5be8:0, corePoolSize=5, maxPoolSize=5 2024-12-03T18:55:07,404 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/db5a5ccf5be8:0, corePoolSize=10, maxPoolSize=10 2024-12-03T18:55:07,404 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/db5a5ccf5be8:0, corePoolSize=1, maxPoolSize=1 2024-12-03T18:55:07,404 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/db5a5ccf5be8:0, corePoolSize=2, maxPoolSize=2 2024-12-03T18:55:07,405 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/db5a5ccf5be8:0, corePoolSize=1, maxPoolSize=1 2024-12-03T18:55:07,406 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733252137406 2024-12-03T18:55:07,406 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-03T18:55:07,407 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-03T18:55:07,407 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-03T18:55:07,407 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-03T18:55:07,407 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-03T18:55:07,407 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-03T18:55:07,407 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-03T18:55:07,407 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-03T18:55:07,407 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-03T18:55:07,408 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-03T18:55:07,408 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-03T18:55:07,408 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-03T18:55:07,408 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-03T18:55:07,408 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-03T18:55:07,409 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/db5a5ccf5be8:0:becomeActiveMaster-HFileCleaner.large.0-1733252107409,5,FailOnTimeoutGroup] 2024-12-03T18:55:07,409 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T18:55:07,409 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/db5a5ccf5be8:0:becomeActiveMaster-HFileCleaner.small.0-1733252107409,5,FailOnTimeoutGroup] 2024-12-03T18:55:07,409 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-03T18:55:07,409 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-03T18:55:07,409 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-03T18:55:07,409 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-03T18:55:07,409 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-03T18:55:07,417 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37041 is added to blk_1073741831_1007 (size=1321) 2024-12-03T18:55:07,417 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38395 is added to blk_1073741831_1007 (size=1321) 2024-12-03T18:55:07,418 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:44241/user/jenkins/test-data/19e78551-52ce-f63a-82b9-a6abc14105f7/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-03T18:55:07,418 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:44241/user/jenkins/test-data/19e78551-52ce-f63a-82b9-a6abc14105f7 2024-12-03T18:55:07,429 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38395 is added to blk_1073741832_1008 (size=32) 2024-12-03T18:55:07,430 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37041 is added to blk_1073741832_1008 (size=32) 2024-12-03T18:55:07,431 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T18:55:07,432 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-03T18:55:07,434 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-03T18:55:07,434 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T18:55:07,435 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T18:55:07,435 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-03T18:55:07,436 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-03T18:55:07,437 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T18:55:07,437 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T18:55:07,437 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-03T18:55:07,439 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-03T18:55:07,439 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T18:55:07,440 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T18:55:07,440 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-03T18:55:07,442 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-03T18:55:07,442 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T18:55:07,442 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T18:55:07,443 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-03T18:55:07,443 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44241/user/jenkins/test-data/19e78551-52ce-f63a-82b9-a6abc14105f7/data/hbase/meta/1588230740 2024-12-03T18:55:07,444 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44241/user/jenkins/test-data/19e78551-52ce-f63a-82b9-a6abc14105f7/data/hbase/meta/1588230740 2024-12-03T18:55:07,446 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-03T18:55:07,446 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-03T18:55:07,446 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-03T18:55:07,448 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-03T18:55:07,450 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44241/user/jenkins/test-data/19e78551-52ce-f63a-82b9-a6abc14105f7/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-03T18:55:07,451 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=803162, jitterRate=0.021274447441101074}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-03T18:55:07,452 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733252107431Initializing all the Stores at 1733252107432 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733252107432Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733252107432Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733252107432Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733252107432Cleaning up temporary data from old regions at 1733252107446 (+14 ms)Region opened successfully at 1733252107452 (+6 ms) 2024-12-03T18:55:07,452 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-03T18:55:07,453 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-03T18:55:07,453 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-03T18:55:07,453 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-03T18:55:07,453 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-03T18:55:07,453 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-03T18:55:07,453 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733252107452Disabling compacts and flushes for region at 1733252107452Disabling writes for close at 1733252107453 (+1 ms)Writing region close event to WAL at 1733252107453Closed at 1733252107453 2024-12-03T18:55:07,455 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-03T18:55:07,455 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-03T18:55:07,455 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-03T18:55:07,457 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-03T18:55:07,459 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-03T18:55:07,464 INFO [RS:0;db5a5ccf5be8:36627 {}] regionserver.HRegionServer(746): ClusterId : 525b2ebb-0d8f-4aeb-81f2-96eb5c1096bd 2024-12-03T18:55:07,464 DEBUG [RS:0;db5a5ccf5be8:36627 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-03T18:55:07,474 DEBUG [RS:0;db5a5ccf5be8:36627 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-03T18:55:07,474 DEBUG [RS:0;db5a5ccf5be8:36627 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-03T18:55:07,485 DEBUG [RS:0;db5a5ccf5be8:36627 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-03T18:55:07,485 DEBUG [RS:0;db5a5ccf5be8:36627 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@16afaf9b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=db5a5ccf5be8/172.17.0.2:0 2024-12-03T18:55:07,497 DEBUG [RS:0;db5a5ccf5be8:36627 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;db5a5ccf5be8:36627 2024-12-03T18:55:07,497 INFO [RS:0;db5a5ccf5be8:36627 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-03T18:55:07,497 INFO [RS:0;db5a5ccf5be8:36627 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-03T18:55:07,497 DEBUG [RS:0;db5a5ccf5be8:36627 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-03T18:55:07,498 INFO [RS:0;db5a5ccf5be8:36627 {}] regionserver.HRegionServer(2659): reportForDuty to master=db5a5ccf5be8,38949,1733252106860 with port=36627, startcode=1733252107030 2024-12-03T18:55:07,498 DEBUG [RS:0;db5a5ccf5be8:36627 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-03T18:55:07,501 INFO [HMaster-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39599, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-12-03T18:55:07,502 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38949 {}] master.ServerManager(363): Checking decommissioned status of RegionServer db5a5ccf5be8,36627,1733252107030 2024-12-03T18:55:07,502 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38949 {}] master.ServerManager(517): Registering regionserver=db5a5ccf5be8,36627,1733252107030 2024-12-03T18:55:07,504 DEBUG [RS:0;db5a5ccf5be8:36627 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:44241/user/jenkins/test-data/19e78551-52ce-f63a-82b9-a6abc14105f7 2024-12-03T18:55:07,504 DEBUG [RS:0;db5a5ccf5be8:36627 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:44241 2024-12-03T18:55:07,504 DEBUG [RS:0;db5a5ccf5be8:36627 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-03T18:55:07,515 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38949-0x1019c8c46070000, quorum=127.0.0.1:49927, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-03T18:55:07,516 DEBUG [RS:0;db5a5ccf5be8:36627 {}] zookeeper.ZKUtil(111): regionserver:36627-0x1019c8c46070001, quorum=127.0.0.1:49927, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/db5a5ccf5be8,36627,1733252107030 2024-12-03T18:55:07,516 WARN [RS:0;db5a5ccf5be8:36627 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-03T18:55:07,516 INFO [RS:0;db5a5ccf5be8:36627 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-03T18:55:07,516 DEBUG [RS:0;db5a5ccf5be8:36627 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:44241/user/jenkins/test-data/19e78551-52ce-f63a-82b9-a6abc14105f7/WALs/db5a5ccf5be8,36627,1733252107030 2024-12-03T18:55:07,517 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [db5a5ccf5be8,36627,1733252107030] 2024-12-03T18:55:07,521 INFO [RS:0;db5a5ccf5be8:36627 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-03T18:55:07,524 INFO [RS:0;db5a5ccf5be8:36627 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-03T18:55:07,524 INFO [RS:0;db5a5ccf5be8:36627 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-03T18:55:07,524 INFO [RS:0;db5a5ccf5be8:36627 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-03T18:55:07,525 INFO [RS:0;db5a5ccf5be8:36627 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-03T18:55:07,526 INFO [RS:0;db5a5ccf5be8:36627 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-03T18:55:07,527 INFO [RS:0;db5a5ccf5be8:36627 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-03T18:55:07,527 DEBUG [RS:0;db5a5ccf5be8:36627 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/db5a5ccf5be8:0, corePoolSize=1, maxPoolSize=1 2024-12-03T18:55:07,527 DEBUG [RS:0;db5a5ccf5be8:36627 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/db5a5ccf5be8:0, corePoolSize=1, maxPoolSize=1 2024-12-03T18:55:07,527 DEBUG [RS:0;db5a5ccf5be8:36627 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/db5a5ccf5be8:0, corePoolSize=1, maxPoolSize=1 2024-12-03T18:55:07,527 DEBUG [RS:0;db5a5ccf5be8:36627 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/db5a5ccf5be8:0, corePoolSize=1, maxPoolSize=1 2024-12-03T18:55:07,527 DEBUG [RS:0;db5a5ccf5be8:36627 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/db5a5ccf5be8:0, corePoolSize=1, maxPoolSize=1 2024-12-03T18:55:07,527 DEBUG [RS:0;db5a5ccf5be8:36627 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/db5a5ccf5be8:0, corePoolSize=2, maxPoolSize=2 2024-12-03T18:55:07,527 DEBUG [RS:0;db5a5ccf5be8:36627 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/db5a5ccf5be8:0, corePoolSize=1, maxPoolSize=1 2024-12-03T18:55:07,527 DEBUG [RS:0;db5a5ccf5be8:36627 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/db5a5ccf5be8:0, corePoolSize=1, maxPoolSize=1 2024-12-03T18:55:07,527 DEBUG [RS:0;db5a5ccf5be8:36627 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/db5a5ccf5be8:0, corePoolSize=1, maxPoolSize=1 2024-12-03T18:55:07,527 DEBUG [RS:0;db5a5ccf5be8:36627 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/db5a5ccf5be8:0, corePoolSize=1, maxPoolSize=1 2024-12-03T18:55:07,527 DEBUG [RS:0;db5a5ccf5be8:36627 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/db5a5ccf5be8:0, corePoolSize=1, maxPoolSize=1 2024-12-03T18:55:07,528 DEBUG [RS:0;db5a5ccf5be8:36627 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/db5a5ccf5be8:0, corePoolSize=1, maxPoolSize=1 2024-12-03T18:55:07,528 DEBUG [RS:0;db5a5ccf5be8:36627 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/db5a5ccf5be8:0, corePoolSize=3, maxPoolSize=3 2024-12-03T18:55:07,528 DEBUG [RS:0;db5a5ccf5be8:36627 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/db5a5ccf5be8:0, corePoolSize=3, maxPoolSize=3 2024-12-03T18:55:07,528 INFO [RS:0;db5a5ccf5be8:36627 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-03T18:55:07,528 INFO [RS:0;db5a5ccf5be8:36627 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-03T18:55:07,528 INFO [RS:0;db5a5ccf5be8:36627 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-03T18:55:07,528 INFO [RS:0;db5a5ccf5be8:36627 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-03T18:55:07,528 INFO [RS:0;db5a5ccf5be8:36627 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-03T18:55:07,529 INFO [RS:0;db5a5ccf5be8:36627 {}] hbase.ChoreService(168): Chore ScheduledChore name=db5a5ccf5be8,36627,1733252107030-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-03T18:55:07,541 INFO [RS:0;db5a5ccf5be8:36627 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-03T18:55:07,541 INFO [RS:0;db5a5ccf5be8:36627 {}] hbase.ChoreService(168): Chore ScheduledChore name=db5a5ccf5be8,36627,1733252107030-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-03T18:55:07,541 INFO [RS:0;db5a5ccf5be8:36627 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T18:55:07,541 INFO [RS:0;db5a5ccf5be8:36627 {}] regionserver.Replication(171): db5a5ccf5be8,36627,1733252107030 started 2024-12-03T18:55:07,554 INFO [RS:0;db5a5ccf5be8:36627 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T18:55:07,554 INFO [RS:0;db5a5ccf5be8:36627 {}] regionserver.HRegionServer(1482): Serving as db5a5ccf5be8,36627,1733252107030, RpcServer on db5a5ccf5be8/172.17.0.2:36627, sessionid=0x1019c8c46070001 2024-12-03T18:55:07,555 DEBUG [RS:0;db5a5ccf5be8:36627 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-03T18:55:07,555 DEBUG [RS:0;db5a5ccf5be8:36627 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager db5a5ccf5be8,36627,1733252107030 2024-12-03T18:55:07,555 DEBUG [RS:0;db5a5ccf5be8:36627 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'db5a5ccf5be8,36627,1733252107030' 2024-12-03T18:55:07,555 DEBUG [RS:0;db5a5ccf5be8:36627 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-03T18:55:07,555 DEBUG [RS:0;db5a5ccf5be8:36627 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-03T18:55:07,556 DEBUG [RS:0;db5a5ccf5be8:36627 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-03T18:55:07,556 DEBUG [RS:0;db5a5ccf5be8:36627 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-03T18:55:07,556 DEBUG [RS:0;db5a5ccf5be8:36627 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager db5a5ccf5be8,36627,1733252107030 2024-12-03T18:55:07,556 DEBUG [RS:0;db5a5ccf5be8:36627 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'db5a5ccf5be8,36627,1733252107030' 2024-12-03T18:55:07,556 DEBUG [RS:0;db5a5ccf5be8:36627 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-03T18:55:07,557 DEBUG [RS:0;db5a5ccf5be8:36627 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-03T18:55:07,557 DEBUG [RS:0;db5a5ccf5be8:36627 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-03T18:55:07,557 INFO [RS:0;db5a5ccf5be8:36627 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-03T18:55:07,557 INFO [RS:0;db5a5ccf5be8:36627 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-03T18:55:07,609 WARN [db5a5ccf5be8:38949 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-03T18:55:07,662 INFO [RS:0;db5a5ccf5be8:36627 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=db5a5ccf5be8%2C36627%2C1733252107030, suffix=, logDir=hdfs://localhost:44241/user/jenkins/test-data/19e78551-52ce-f63a-82b9-a6abc14105f7/WALs/db5a5ccf5be8,36627,1733252107030, archiveDir=hdfs://localhost:44241/user/jenkins/test-data/19e78551-52ce-f63a-82b9-a6abc14105f7/oldWALs, maxLogs=32 2024-12-03T18:55:07,667 INFO [RS:0;db5a5ccf5be8:36627 {}] monitor.StreamSlowMonitor(122): New stream slow monitor db5a5ccf5be8%2C36627%2C1733252107030.1733252107666 2024-12-03T18:55:07,678 INFO [RS:0;db5a5ccf5be8:36627 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/19e78551-52ce-f63a-82b9-a6abc14105f7/WALs/db5a5ccf5be8,36627,1733252107030/db5a5ccf5be8%2C36627%2C1733252107030.1733252107666 2024-12-03T18:55:07,682 DEBUG [RS:0;db5a5ccf5be8:36627 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45843:45843),(127.0.0.1/127.0.0.1:44039:44039)] 2024-12-03T18:55:07,860 DEBUG [db5a5ccf5be8:38949 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-03T18:55:07,860 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=db5a5ccf5be8,36627,1733252107030 2024-12-03T18:55:07,863 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as db5a5ccf5be8,36627,1733252107030, state=OPENING 2024-12-03T18:55:07,876 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-03T18:55:07,876 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-03T18:55:07,878 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling 2024-12-03T18:55:07,947 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-03T18:55:07,958 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38949-0x1019c8c46070000, quorum=127.0.0.1:49927, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T18:55:07,958 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36627-0x1019c8c46070001, quorum=127.0.0.1:49927, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T18:55:07,959 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-03T18:55:07,959 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-03T18:55:07,959 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-03T18:55:07,959 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=db5a5ccf5be8,36627,1733252107030}] 2024-12-03T18:55:08,116 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-03T18:55:08,122 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55981, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-03T18:55:08,129 INFO [RS_OPEN_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-03T18:55:08,129 INFO [RS_OPEN_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-03T18:55:08,132 INFO [RS_OPEN_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=db5a5ccf5be8%2C36627%2C1733252107030.meta, suffix=.meta, logDir=hdfs://localhost:44241/user/jenkins/test-data/19e78551-52ce-f63a-82b9-a6abc14105f7/WALs/db5a5ccf5be8,36627,1733252107030, archiveDir=hdfs://localhost:44241/user/jenkins/test-data/19e78551-52ce-f63a-82b9-a6abc14105f7/oldWALs, maxLogs=32 2024-12-03T18:55:08,135 INFO [RS_OPEN_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor db5a5ccf5be8%2C36627%2C1733252107030.meta.1733252108135.meta 2024-12-03T18:55:08,141 INFO [RS_OPEN_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/19e78551-52ce-f63a-82b9-a6abc14105f7/WALs/db5a5ccf5be8,36627,1733252107030/db5a5ccf5be8%2C36627%2C1733252107030.meta.1733252108135.meta 2024-12-03T18:55:08,142 DEBUG [RS_OPEN_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45843:45843),(127.0.0.1/127.0.0.1:44039:44039)] 2024-12-03T18:55:08,143 DEBUG [RS_OPEN_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-03T18:55:08,143 DEBUG [RS_OPEN_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-03T18:55:08,143 DEBUG [RS_OPEN_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-03T18:55:08,143 INFO [RS_OPEN_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-03T18:55:08,143 DEBUG [RS_OPEN_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-03T18:55:08,143 DEBUG [RS_OPEN_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T18:55:08,144 DEBUG [RS_OPEN_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-03T18:55:08,144 DEBUG [RS_OPEN_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-03T18:55:08,146 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-03T18:55:08,147 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-03T18:55:08,147 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T18:55:08,148 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T18:55:08,148 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-03T18:55:08,149 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-03T18:55:08,149 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T18:55:08,150 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T18:55:08,150 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-03T18:55:08,152 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-03T18:55:08,152 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T18:55:08,152 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T18:55:08,152 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-03T18:55:08,153 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-03T18:55:08,154 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T18:55:08,154 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T18:55:08,154 DEBUG [RS_OPEN_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-03T18:55:08,155 DEBUG [RS_OPEN_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44241/user/jenkins/test-data/19e78551-52ce-f63a-82b9-a6abc14105f7/data/hbase/meta/1588230740 2024-12-03T18:55:08,156 DEBUG [RS_OPEN_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44241/user/jenkins/test-data/19e78551-52ce-f63a-82b9-a6abc14105f7/data/hbase/meta/1588230740 2024-12-03T18:55:08,158 DEBUG [RS_OPEN_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-03T18:55:08,158 DEBUG [RS_OPEN_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-03T18:55:08,158 DEBUG [RS_OPEN_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-03T18:55:08,160 DEBUG [RS_OPEN_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-03T18:55:08,161 INFO [RS_OPEN_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=835159, jitterRate=0.06196029484272003}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-03T18:55:08,161 DEBUG [RS_OPEN_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-03T18:55:08,162 DEBUG [RS_OPEN_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733252108144Writing region info on filesystem at 1733252108144Initializing all the Stores at 1733252108145 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733252108145Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733252108145Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733252108146 (+1 ms)Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733252108146Cleaning up temporary data from old regions at 1733252108158 (+12 ms)Running coprocessor post-open hooks at 1733252108161 (+3 ms)Region opened successfully at 1733252108162 (+1 ms) 2024-12-03T18:55:08,163 INFO [RS_OPEN_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733252108116 2024-12-03T18:55:08,166 DEBUG [RS_OPEN_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-03T18:55:08,166 INFO [RS_OPEN_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-03T18:55:08,167 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=db5a5ccf5be8,36627,1733252107030 2024-12-03T18:55:08,168 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as db5a5ccf5be8,36627,1733252107030, state=OPEN 2024-12-03T18:55:08,210 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38949-0x1019c8c46070000, quorum=127.0.0.1:49927, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-03T18:55:08,210 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36627-0x1019c8c46070001, quorum=127.0.0.1:49927, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-03T18:55:08,210 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=db5a5ccf5be8,36627,1733252107030 2024-12-03T18:55:08,210 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-03T18:55:08,210 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-03T18:55:08,214 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-03T18:55:08,214 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=db5a5ccf5be8,36627,1733252107030 in 251 msec 2024-12-03T18:55:08,217 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-03T18:55:08,218 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 759 msec 2024-12-03T18:55:08,219 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-03T18:55:08,219 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-03T18:55:08,220 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-03T18:55:08,220 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=db5a5ccf5be8,36627,1733252107030, seqNum=-1] 2024-12-03T18:55:08,221 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-03T18:55:08,222 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55727, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-03T18:55:08,230 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 829 msec 2024-12-03T18:55:08,230 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733252108230, completionTime=-1 2024-12-03T18:55:08,230 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-03T18:55:08,230 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-03T18:55:08,233 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-12-03T18:55:08,233 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733252168233 2024-12-03T18:55:08,233 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733252228233 2024-12-03T18:55:08,233 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-12-03T18:55:08,233 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=db5a5ccf5be8,38949,1733252106860-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-03T18:55:08,234 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=db5a5ccf5be8,38949,1733252106860-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T18:55:08,234 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=db5a5ccf5be8,38949,1733252106860-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T18:55:08,234 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-db5a5ccf5be8:38949, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T18:55:08,234 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-03T18:55:08,234 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-03T18:55:08,236 DEBUG [master/db5a5ccf5be8:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-03T18:55:08,239 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.134sec 2024-12-03T18:55:08,240 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-03T18:55:08,240 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-03T18:55:08,240 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-03T18:55:08,240 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-03T18:55:08,240 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-03T18:55:08,240 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=db5a5ccf5be8,38949,1733252106860-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-03T18:55:08,240 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=db5a5ccf5be8,38949,1733252106860-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-03T18:55:08,243 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-03T18:55:08,243 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-03T18:55:08,243 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=db5a5ccf5be8,38949,1733252106860-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T18:55:08,265 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6aa3a1e6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T18:55:08,265 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request db5a5ccf5be8,38949,-1 for getting cluster id 2024-12-03T18:55:08,265 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-03T18:55:08,267 DEBUG [HMaster-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '525b2ebb-0d8f-4aeb-81f2-96eb5c1096bd' 2024-12-03T18:55:08,268 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-03T18:55:08,268 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "525b2ebb-0d8f-4aeb-81f2-96eb5c1096bd" 2024-12-03T18:55:08,268 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7d34ab95, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T18:55:08,268 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [db5a5ccf5be8,38949,-1] 2024-12-03T18:55:08,268 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-03T18:55:08,269 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T18:55:08,271 INFO [HMaster-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45632, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-03T18:55:08,272 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@47ca969, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T18:55:08,272 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-03T18:55:08,273 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=db5a5ccf5be8,36627,1733252107030, seqNum=-1] 2024-12-03T18:55:08,274 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-03T18:55:08,276 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39760, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-03T18:55:08,277 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=db5a5ccf5be8,38949,1733252106860 2024-12-03T18:55:08,278 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T18:55:08,281 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-12-03T18:55:08,282 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-03T18:55:08,282 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-03T18:55:08,282 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at org.apache.hadoop.hbase.regionserver.wal.TestLogRolling.testLogRollOnDatanodeDeath(TestLogRolling.java:201) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-03T18:55:08,282 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T18:55:08,282 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-03T18:55:08,282 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T18:55:08,283 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-03T18:55:08,283 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=216830630, stopped=false 2024-12-03T18:55:08,283 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=db5a5ccf5be8,38949,1733252106860 2024-12-03T18:55:08,305 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36627-0x1019c8c46070001, quorum=127.0.0.1:49927, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-03T18:55:08,305 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38949-0x1019c8c46070000, quorum=127.0.0.1:49927, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-03T18:55:08,305 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38949-0x1019c8c46070000, quorum=127.0.0.1:49927, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T18:55:08,305 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36627-0x1019c8c46070001, quorum=127.0.0.1:49927, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T18:55:08,305 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-03T18:55:08,305 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-03T18:55:08,305 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at org.apache.hadoop.hbase.regionserver.wal.TestLogRolling.testLogRollOnDatanodeDeath(TestLogRolling.java:201) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-03T18:55:08,305 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:36627-0x1019c8c46070001, quorum=127.0.0.1:49927, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-03T18:55:08,305 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T18:55:08,306 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:38949-0x1019c8c46070000, quorum=127.0.0.1:49927, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-03T18:55:08,306 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'db5a5ccf5be8,36627,1733252107030' ***** 2024-12-03T18:55:08,306 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-03T18:55:08,306 INFO [RS:0;db5a5ccf5be8:36627 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-03T18:55:08,306 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-03T18:55:08,306 INFO [RS:0;db5a5ccf5be8:36627 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-03T18:55:08,307 INFO [RS:0;db5a5ccf5be8:36627 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-03T18:55:08,307 INFO [RS:0;db5a5ccf5be8:36627 {}] regionserver.HRegionServer(959): stopping server db5a5ccf5be8,36627,1733252107030 2024-12-03T18:55:08,307 INFO [RS:0;db5a5ccf5be8:36627 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-03T18:55:08,307 INFO [RS:0;db5a5ccf5be8:36627 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;db5a5ccf5be8:36627. 2024-12-03T18:55:08,307 DEBUG [RS:0;db5a5ccf5be8:36627 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-03T18:55:08,307 DEBUG [RS:0;db5a5ccf5be8:36627 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T18:55:08,307 INFO [RS:0;db5a5ccf5be8:36627 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-03T18:55:08,307 INFO [RS:0;db5a5ccf5be8:36627 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-03T18:55:08,307 INFO [RS:0;db5a5ccf5be8:36627 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-03T18:55:08,307 INFO [RS:0;db5a5ccf5be8:36627 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-03T18:55:08,307 INFO [RS:0;db5a5ccf5be8:36627 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-12-03T18:55:08,307 DEBUG [RS:0;db5a5ccf5be8:36627 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-12-03T18:55:08,307 DEBUG [RS:0;db5a5ccf5be8:36627 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-12-03T18:55:08,307 DEBUG [RS_CLOSE_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-03T18:55:08,307 INFO [RS_CLOSE_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-03T18:55:08,308 DEBUG [RS_CLOSE_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-03T18:55:08,308 DEBUG [RS_CLOSE_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-03T18:55:08,308 DEBUG [RS_CLOSE_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-03T18:55:08,308 INFO [RS_CLOSE_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=74 B heapSize=1.22 KB 2024-12-03T18:55:08,323 DEBUG [RS_CLOSE_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44241/user/jenkins/test-data/19e78551-52ce-f63a-82b9-a6abc14105f7/data/hbase/meta/1588230740/.tmp/ns/4c5f2daf9421423fa9d1811093db5af5 is 43, key is default/ns:d/1733252108223/Put/seqid=0 2024-12-03T18:55:08,329 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38395 is added to blk_1073741835_1011 (size=5153) 2024-12-03T18:55:08,329 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37041 is added to blk_1073741835_1011 (size=5153) 2024-12-03T18:55:08,329 INFO [RS_CLOSE_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:44241/user/jenkins/test-data/19e78551-52ce-f63a-82b9-a6abc14105f7/data/hbase/meta/1588230740/.tmp/ns/4c5f2daf9421423fa9d1811093db5af5 2024-12-03T18:55:08,337 DEBUG [RS_CLOSE_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44241/user/jenkins/test-data/19e78551-52ce-f63a-82b9-a6abc14105f7/data/hbase/meta/1588230740/.tmp/ns/4c5f2daf9421423fa9d1811093db5af5 as hdfs://localhost:44241/user/jenkins/test-data/19e78551-52ce-f63a-82b9-a6abc14105f7/data/hbase/meta/1588230740/ns/4c5f2daf9421423fa9d1811093db5af5 2024-12-03T18:55:08,345 INFO [RS_CLOSE_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44241/user/jenkins/test-data/19e78551-52ce-f63a-82b9-a6abc14105f7/data/hbase/meta/1588230740/ns/4c5f2daf9421423fa9d1811093db5af5, entries=2, sequenceid=6, filesize=5.0 K 2024-12-03T18:55:08,347 INFO [RS_CLOSE_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 39ms, sequenceid=6, compaction requested=false 2024-12-03T18:55:08,347 DEBUG [RS_CLOSE_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-03T18:55:08,352 DEBUG [RS_CLOSE_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44241/user/jenkins/test-data/19e78551-52ce-f63a-82b9-a6abc14105f7/data/hbase/meta/1588230740/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-03T18:55:08,353 DEBUG [RS_CLOSE_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-03T18:55:08,353 INFO [RS_CLOSE_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-03T18:55:08,353 DEBUG [RS_CLOSE_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733252108307Running coprocessor pre-close hooks at 1733252108307Disabling compacts and flushes for region at 1733252108307Disabling writes for close at 1733252108308 (+1 ms)Obtaining lock to block concurrent updates at 1733252108308Preparing flush snapshotting stores in 1588230740 at 1733252108308Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=74, getHeapSize=1184, getOffHeapSize=0, getCellsCount=2 at 1733252108308Flushing stores of hbase:meta,,1.1588230740 at 1733252108309 (+1 ms)Flushing 1588230740/ns: creating writer at 1733252108309Flushing 1588230740/ns: appending metadata at 1733252108323 (+14 ms)Flushing 1588230740/ns: closing flushed file at 1733252108323Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3c61e3e6: reopening flushed file at 1733252108336 (+13 ms)Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 39ms, sequenceid=6, compaction requested=false at 1733252108347 (+11 ms)Writing region close event to WAL at 1733252108348 (+1 ms)Running coprocessor post-close hooks at 1733252108353 (+5 ms)Closed at 1733252108353 2024-12-03T18:55:08,353 DEBUG [RS_CLOSE_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-03T18:55:08,456 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:55:08,462 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:55:08,508 INFO [RS:0;db5a5ccf5be8:36627 {}] regionserver.HRegionServer(976): stopping server db5a5ccf5be8,36627,1733252107030; all regions closed. 2024-12-03T18:55:08,508 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:55:08,509 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:55:08,509 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:55:08,509 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:55:08,509 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:55:08,513 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37041 is added to blk_1073741834_1010 (size=1152) 2024-12-03T18:55:08,514 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38395 is added to blk_1073741834_1010 (size=1152) 2024-12-03T18:55:08,517 DEBUG [RS:0;db5a5ccf5be8:36627 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/19e78551-52ce-f63a-82b9-a6abc14105f7/oldWALs 2024-12-03T18:55:08,517 INFO [RS:0;db5a5ccf5be8:36627 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog db5a5ccf5be8%2C36627%2C1733252107030.meta:.meta(num 1733252108135) 2024-12-03T18:55:08,518 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:55:08,518 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:55:08,518 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:55:08,518 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:55:08,518 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:55:08,520 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37041 is added to blk_1073741833_1009 (size=93) 2024-12-03T18:55:08,521 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38395 is added to blk_1073741833_1009 (size=93) 2024-12-03T18:55:08,522 DEBUG [RS:0;db5a5ccf5be8:36627 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/19e78551-52ce-f63a-82b9-a6abc14105f7/oldWALs 2024-12-03T18:55:08,523 INFO [RS:0;db5a5ccf5be8:36627 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog db5a5ccf5be8%2C36627%2C1733252107030:(num 1733252107666) 2024-12-03T18:55:08,523 DEBUG [RS:0;db5a5ccf5be8:36627 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T18:55:08,523 INFO [RS:0;db5a5ccf5be8:36627 {}] regionserver.LeaseManager(133): Closed leases 2024-12-03T18:55:08,523 INFO [RS:0;db5a5ccf5be8:36627 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-03T18:55:08,523 INFO [RS:0;db5a5ccf5be8:36627 {}] hbase.ChoreService(370): Chore service for: regionserver/db5a5ccf5be8:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-03T18:55:08,523 INFO [RS:0;db5a5ccf5be8:36627 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-03T18:55:08,523 INFO [regionserver/db5a5ccf5be8:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-03T18:55:08,523 INFO [RS:0;db5a5ccf5be8:36627 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:36627 2024-12-03T18:55:08,536 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36627-0x1019c8c46070001, quorum=127.0.0.1:49927, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/db5a5ccf5be8,36627,1733252107030 2024-12-03T18:55:08,536 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38949-0x1019c8c46070000, quorum=127.0.0.1:49927, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-03T18:55:08,536 INFO [RS:0;db5a5ccf5be8:36627 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-03T18:55:08,547 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [db5a5ccf5be8,36627,1733252107030] 2024-12-03T18:55:08,557 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/db5a5ccf5be8,36627,1733252107030 already deleted, retry=false 2024-12-03T18:55:08,558 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; db5a5ccf5be8,36627,1733252107030 expired; onlineServers=0 2024-12-03T18:55:08,558 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'db5a5ccf5be8,38949,1733252106860' ***** 2024-12-03T18:55:08,558 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-03T18:55:08,558 INFO [M:0;db5a5ccf5be8:38949 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-03T18:55:08,558 INFO [M:0;db5a5ccf5be8:38949 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-03T18:55:08,558 DEBUG [M:0;db5a5ccf5be8:38949 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-03T18:55:08,559 DEBUG [M:0;db5a5ccf5be8:38949 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-03T18:55:08,559 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-03T18:55:08,559 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster-HFileCleaner.small.0-1733252107409 {}] cleaner.HFileCleaner(306): Exit Thread[master/db5a5ccf5be8:0:becomeActiveMaster-HFileCleaner.small.0-1733252107409,5,FailOnTimeoutGroup] 2024-12-03T18:55:08,559 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster-HFileCleaner.large.0-1733252107409 {}] cleaner.HFileCleaner(306): Exit Thread[master/db5a5ccf5be8:0:becomeActiveMaster-HFileCleaner.large.0-1733252107409,5,FailOnTimeoutGroup] 2024-12-03T18:55:08,559 INFO [M:0;db5a5ccf5be8:38949 {}] hbase.ChoreService(370): Chore service for: master/db5a5ccf5be8:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-03T18:55:08,559 INFO [M:0;db5a5ccf5be8:38949 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-03T18:55:08,560 DEBUG [M:0;db5a5ccf5be8:38949 {}] master.HMaster(1795): Stopping service threads 2024-12-03T18:55:08,560 INFO [M:0;db5a5ccf5be8:38949 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-03T18:55:08,560 INFO [M:0;db5a5ccf5be8:38949 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-03T18:55:08,560 INFO [M:0;db5a5ccf5be8:38949 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-03T18:55:08,561 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-03T18:55:08,568 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38949-0x1019c8c46070000, quorum=127.0.0.1:49927, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-03T18:55:08,569 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38949-0x1019c8c46070000, quorum=127.0.0.1:49927, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T18:55:08,569 DEBUG [M:0;db5a5ccf5be8:38949 {}] zookeeper.ZKUtil(347): master:38949-0x1019c8c46070000, quorum=127.0.0.1:49927, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-03T18:55:08,569 WARN [M:0;db5a5ccf5be8:38949 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-03T18:55:08,570 INFO [M:0;db5a5ccf5be8:38949 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:44241/user/jenkins/test-data/19e78551-52ce-f63a-82b9-a6abc14105f7/.lastflushedseqids 2024-12-03T18:55:08,579 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37041 is added to blk_1073741836_1012 (size=99) 2024-12-03T18:55:08,580 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38395 is added to blk_1073741836_1012 (size=99) 2024-12-03T18:55:08,580 INFO [M:0;db5a5ccf5be8:38949 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-03T18:55:08,580 INFO [M:0;db5a5ccf5be8:38949 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-03T18:55:08,580 DEBUG [M:0;db5a5ccf5be8:38949 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-03T18:55:08,580 INFO [M:0;db5a5ccf5be8:38949 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T18:55:08,580 DEBUG [M:0;db5a5ccf5be8:38949 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T18:55:08,580 DEBUG [M:0;db5a5ccf5be8:38949 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-03T18:55:08,580 DEBUG [M:0;db5a5ccf5be8:38949 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T18:55:08,581 INFO [M:0;db5a5ccf5be8:38949 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=7.67 KB heapSize=11.34 KB 2024-12-03T18:55:08,598 DEBUG [M:0;db5a5ccf5be8:38949 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44241/user/jenkins/test-data/19e78551-52ce-f63a-82b9-a6abc14105f7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/a120653384f34bd2bdbe10a9aeb1fcff is 82, key is hbase:meta,,1/info:regioninfo/1733252108167/Put/seqid=0 2024-12-03T18:55:08,603 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37041 is added to blk_1073741837_1013 (size=5672) 2024-12-03T18:55:08,603 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38395 is added to blk_1073741837_1013 (size=5672) 2024-12-03T18:55:08,604 INFO [M:0;db5a5ccf5be8:38949 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:44241/user/jenkins/test-data/19e78551-52ce-f63a-82b9-a6abc14105f7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/a120653384f34bd2bdbe10a9aeb1fcff 2024-12-03T18:55:08,623 DEBUG [M:0;db5a5ccf5be8:38949 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44241/user/jenkins/test-data/19e78551-52ce-f63a-82b9-a6abc14105f7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/5cf94c9def224018b0ca2cb73b183fe1 is 240, key is \x00\x00\x00\x00\x00\x00\x00\x01/proc:d/1733252108229/Put/seqid=0 2024-12-03T18:55:08,628 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38395 is added to blk_1073741838_1014 (size=5275) 2024-12-03T18:55:08,629 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37041 is added to blk_1073741838_1014 (size=5275) 2024-12-03T18:55:08,629 INFO [M:0;db5a5ccf5be8:38949 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.06 KB at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:44241/user/jenkins/test-data/19e78551-52ce-f63a-82b9-a6abc14105f7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/5cf94c9def224018b0ca2cb73b183fe1 2024-12-03T18:55:08,647 INFO [RS:0;db5a5ccf5be8:36627 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-03T18:55:08,647 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36627-0x1019c8c46070001, quorum=127.0.0.1:49927, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-03T18:55:08,647 INFO [RS:0;db5a5ccf5be8:36627 {}] regionserver.HRegionServer(1031): Exiting; stopping=db5a5ccf5be8,36627,1733252107030; zookeeper connection closed. 2024-12-03T18:55:08,647 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36627-0x1019c8c46070001, quorum=127.0.0.1:49927, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-03T18:55:08,648 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@2975e0f5 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@2975e0f5 2024-12-03T18:55:08,648 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-12-03T18:55:08,650 DEBUG [M:0;db5a5ccf5be8:38949 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44241/user/jenkins/test-data/19e78551-52ce-f63a-82b9-a6abc14105f7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/15fb1c6b939542418a4cedf873d6b885 is 69, key is db5a5ccf5be8,36627,1733252107030/rs:state/1733252107502/Put/seqid=0 2024-12-03T18:55:08,655 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37041 is added to blk_1073741839_1015 (size=5156) 2024-12-03T18:55:08,655 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38395 is added to blk_1073741839_1015 (size=5156) 2024-12-03T18:55:08,656 INFO [M:0;db5a5ccf5be8:38949 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:44241/user/jenkins/test-data/19e78551-52ce-f63a-82b9-a6abc14105f7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/15fb1c6b939542418a4cedf873d6b885 2024-12-03T18:55:08,677 DEBUG [M:0;db5a5ccf5be8:38949 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44241/user/jenkins/test-data/19e78551-52ce-f63a-82b9-a6abc14105f7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/c4843992eabf4ce7a33c0ca607b4858a is 52, key is load_balancer_on/state:d/1733252108280/Put/seqid=0 2024-12-03T18:55:08,681 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38395 is added to blk_1073741840_1016 (size=5056) 2024-12-03T18:55:08,681 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37041 is added to blk_1073741840_1016 (size=5056) 2024-12-03T18:55:08,682 INFO [M:0;db5a5ccf5be8:38949 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:44241/user/jenkins/test-data/19e78551-52ce-f63a-82b9-a6abc14105f7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/c4843992eabf4ce7a33c0ca607b4858a 2024-12-03T18:55:08,689 DEBUG [M:0;db5a5ccf5be8:38949 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44241/user/jenkins/test-data/19e78551-52ce-f63a-82b9-a6abc14105f7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/a120653384f34bd2bdbe10a9aeb1fcff as hdfs://localhost:44241/user/jenkins/test-data/19e78551-52ce-f63a-82b9-a6abc14105f7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/a120653384f34bd2bdbe10a9aeb1fcff 2024-12-03T18:55:08,696 INFO [M:0;db5a5ccf5be8:38949 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44241/user/jenkins/test-data/19e78551-52ce-f63a-82b9-a6abc14105f7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/a120653384f34bd2bdbe10a9aeb1fcff, entries=8, sequenceid=29, filesize=5.5 K 2024-12-03T18:55:08,697 DEBUG [M:0;db5a5ccf5be8:38949 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44241/user/jenkins/test-data/19e78551-52ce-f63a-82b9-a6abc14105f7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/5cf94c9def224018b0ca2cb73b183fe1 as hdfs://localhost:44241/user/jenkins/test-data/19e78551-52ce-f63a-82b9-a6abc14105f7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/5cf94c9def224018b0ca2cb73b183fe1 2024-12-03T18:55:08,704 INFO [M:0;db5a5ccf5be8:38949 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44241/user/jenkins/test-data/19e78551-52ce-f63a-82b9-a6abc14105f7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/5cf94c9def224018b0ca2cb73b183fe1, entries=3, sequenceid=29, filesize=5.2 K 2024-12-03T18:55:08,705 DEBUG [M:0;db5a5ccf5be8:38949 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44241/user/jenkins/test-data/19e78551-52ce-f63a-82b9-a6abc14105f7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/15fb1c6b939542418a4cedf873d6b885 as hdfs://localhost:44241/user/jenkins/test-data/19e78551-52ce-f63a-82b9-a6abc14105f7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/15fb1c6b939542418a4cedf873d6b885 2024-12-03T18:55:08,712 INFO [M:0;db5a5ccf5be8:38949 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44241/user/jenkins/test-data/19e78551-52ce-f63a-82b9-a6abc14105f7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/15fb1c6b939542418a4cedf873d6b885, entries=1, sequenceid=29, filesize=5.0 K 2024-12-03T18:55:08,714 DEBUG [M:0;db5a5ccf5be8:38949 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44241/user/jenkins/test-data/19e78551-52ce-f63a-82b9-a6abc14105f7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/c4843992eabf4ce7a33c0ca607b4858a as hdfs://localhost:44241/user/jenkins/test-data/19e78551-52ce-f63a-82b9-a6abc14105f7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/c4843992eabf4ce7a33c0ca607b4858a 2024-12-03T18:55:08,720 INFO [M:0;db5a5ccf5be8:38949 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44241/user/jenkins/test-data/19e78551-52ce-f63a-82b9-a6abc14105f7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/c4843992eabf4ce7a33c0ca607b4858a, entries=1, sequenceid=29, filesize=4.9 K 2024-12-03T18:55:08,721 INFO [M:0;db5a5ccf5be8:38949 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 141ms, sequenceid=29, compaction requested=false 2024-12-03T18:55:08,723 INFO [M:0;db5a5ccf5be8:38949 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T18:55:08,723 DEBUG [M:0;db5a5ccf5be8:38949 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733252108580Disabling compacts and flushes for region at 1733252108580Disabling writes for close at 1733252108580Obtaining lock to block concurrent updates at 1733252108581 (+1 ms)Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733252108581Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=7850, getHeapSize=11544, getOffHeapSize=0, getCellsCount=36 at 1733252108581Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733252108582 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733252108582Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733252108598 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733252108598Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733252108609 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733252108623 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733252108623Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733252108634 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733252108650 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733252108650Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1733252108662 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1733252108676 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1733252108676Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@67954a4e: reopening flushed file at 1733252108688 (+12 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1536745b: reopening flushed file at 1733252108696 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@69a82984: reopening flushed file at 1733252108704 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@752f0bd: reopening flushed file at 1733252108712 (+8 ms)Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 141ms, sequenceid=29, compaction requested=false at 1733252108721 (+9 ms)Writing region close event to WAL at 1733252108723 (+2 ms)Closed at 1733252108723 2024-12-03T18:55:08,723 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:55:08,723 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:55:08,724 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:55:08,724 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:55:08,724 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:55:08,726 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37041 is added to blk_1073741830_1006 (size=10311) 2024-12-03T18:55:08,726 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38395 is added to blk_1073741830_1006 (size=10311) 2024-12-03T18:55:08,726 INFO [M:0;db5a5ccf5be8:38949 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-03T18:55:08,726 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-03T18:55:08,726 INFO [M:0;db5a5ccf5be8:38949 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:38949 2024-12-03T18:55:08,726 INFO [M:0;db5a5ccf5be8:38949 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-03T18:55:08,837 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38949-0x1019c8c46070000, quorum=127.0.0.1:49927, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-03T18:55:08,837 INFO [M:0;db5a5ccf5be8:38949 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-03T18:55:08,837 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38949-0x1019c8c46070000, quorum=127.0.0.1:49927, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-03T18:55:08,843 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@75434f63{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T18:55:08,844 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1a2066f8{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-03T18:55:08,844 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-03T18:55:08,845 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@aab268d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-03T18:55:08,845 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1ce533a5{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a25f0302-218a-bd59-7bd3-7631b6dc06d8/hadoop.log.dir/,STOPPED} 2024-12-03T18:55:08,848 WARN [BP-1452214034-172.17.0.2-1733252104586 heartbeating to localhost/127.0.0.1:44241 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-03T18:55:08,848 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-03T18:55:08,848 WARN [BP-1452214034-172.17.0.2-1733252104586 heartbeating to localhost/127.0.0.1:44241 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1452214034-172.17.0.2-1733252104586 (Datanode Uuid 68f6f4d9-6929-4ccf-a34d-b00bc33c9b1e) service to localhost/127.0.0.1:44241 2024-12-03T18:55:08,848 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-03T18:55:08,848 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a25f0302-218a-bd59-7bd3-7631b6dc06d8/cluster_5a032f18-ccbd-67a6-bac0-464d99c54e3b/data/data3/current/BP-1452214034-172.17.0.2-1733252104586 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T18:55:08,849 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a25f0302-218a-bd59-7bd3-7631b6dc06d8/cluster_5a032f18-ccbd-67a6-bac0-464d99c54e3b/data/data4/current/BP-1452214034-172.17.0.2-1733252104586 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T18:55:08,849 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-03T18:55:08,850 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7d69c419{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T18:55:08,851 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2f2378c9{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-03T18:55:08,851 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-03T18:55:08,851 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7517d9e5{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-03T18:55:08,851 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3e23c0c8{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a25f0302-218a-bd59-7bd3-7631b6dc06d8/hadoop.log.dir/,STOPPED} 2024-12-03T18:55:08,852 WARN [BP-1452214034-172.17.0.2-1733252104586 heartbeating to localhost/127.0.0.1:44241 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-03T18:55:08,852 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-03T18:55:08,852 WARN [BP-1452214034-172.17.0.2-1733252104586 heartbeating to localhost/127.0.0.1:44241 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1452214034-172.17.0.2-1733252104586 (Datanode Uuid 21509893-4fc1-4cac-80d7-7665318de056) service to localhost/127.0.0.1:44241 2024-12-03T18:55:08,852 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-03T18:55:08,852 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a25f0302-218a-bd59-7bd3-7631b6dc06d8/cluster_5a032f18-ccbd-67a6-bac0-464d99c54e3b/data/data1/current/BP-1452214034-172.17.0.2-1733252104586 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T18:55:08,853 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a25f0302-218a-bd59-7bd3-7631b6dc06d8/cluster_5a032f18-ccbd-67a6-bac0-464d99c54e3b/data/data2/current/BP-1452214034-172.17.0.2-1733252104586 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T18:55:08,853 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-03T18:55:08,857 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7d95bc23{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-03T18:55:08,857 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@78fa6004{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-03T18:55:08,857 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-03T18:55:08,858 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@30a1c2a3{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-03T18:55:08,858 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2f841e9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a25f0302-218a-bd59-7bd3-7631b6dc06d8/hadoop.log.dir/,STOPPED} 2024-12-03T18:55:08,863 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-03T18:55:08,880 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-03T18:55:08,880 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-03T18:55:08,880 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a25f0302-218a-bd59-7bd3-7631b6dc06d8/hadoop.log.dir so I do NOT create it in target/test-data/c4a60070-dd7e-802a-342e-5f20eae680fc 2024-12-03T18:55:08,880 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a25f0302-218a-bd59-7bd3-7631b6dc06d8/hadoop.tmp.dir so I do NOT create it in target/test-data/c4a60070-dd7e-802a-342e-5f20eae680fc 2024-12-03T18:55:08,881 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c4a60070-dd7e-802a-342e-5f20eae680fc/cluster_4187d5fb-6995-465f-5c7c-0ffcd2fcb479, deleteOnExit=true 2024-12-03T18:55:08,881 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-03T18:55:08,881 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c4a60070-dd7e-802a-342e-5f20eae680fc/test.cache.data in system properties and HBase conf 2024-12-03T18:55:08,881 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c4a60070-dd7e-802a-342e-5f20eae680fc/hadoop.tmp.dir in system properties and HBase conf 2024-12-03T18:55:08,881 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c4a60070-dd7e-802a-342e-5f20eae680fc/hadoop.log.dir in system properties and HBase conf 2024-12-03T18:55:08,881 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c4a60070-dd7e-802a-342e-5f20eae680fc/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-03T18:55:08,881 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c4a60070-dd7e-802a-342e-5f20eae680fc/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-03T18:55:08,881 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-03T18:55:08,881 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-03T18:55:08,882 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c4a60070-dd7e-802a-342e-5f20eae680fc/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-03T18:55:08,882 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c4a60070-dd7e-802a-342e-5f20eae680fc/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-03T18:55:08,882 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c4a60070-dd7e-802a-342e-5f20eae680fc/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-03T18:55:08,882 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c4a60070-dd7e-802a-342e-5f20eae680fc/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-03T18:55:08,882 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c4a60070-dd7e-802a-342e-5f20eae680fc/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-03T18:55:08,882 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c4a60070-dd7e-802a-342e-5f20eae680fc/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-03T18:55:08,882 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c4a60070-dd7e-802a-342e-5f20eae680fc/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-03T18:55:08,882 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c4a60070-dd7e-802a-342e-5f20eae680fc/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-03T18:55:08,882 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c4a60070-dd7e-802a-342e-5f20eae680fc/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-03T18:55:08,882 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c4a60070-dd7e-802a-342e-5f20eae680fc/nfs.dump.dir in system properties and HBase conf 2024-12-03T18:55:08,882 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c4a60070-dd7e-802a-342e-5f20eae680fc/java.io.tmpdir in system properties and HBase conf 2024-12-03T18:55:08,882 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c4a60070-dd7e-802a-342e-5f20eae680fc/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-03T18:55:08,883 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c4a60070-dd7e-802a-342e-5f20eae680fc/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-03T18:55:08,883 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c4a60070-dd7e-802a-342e-5f20eae680fc/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-03T18:55:08,894 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-03T18:55:08,973 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-03T18:55:08,975 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:55:08,987 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:55:08,988 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:55:08,989 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:55:09,198 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-03T18:55:09,202 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-03T18:55:09,204 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-03T18:55:09,204 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-03T18:55:09,204 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-03T18:55:09,205 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-03T18:55:09,205 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4ac253d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c4a60070-dd7e-802a-342e-5f20eae680fc/hadoop.log.dir/,AVAILABLE} 2024-12-03T18:55:09,205 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3e26ba04{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-03T18:55:09,295 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@75096fee{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c4a60070-dd7e-802a-342e-5f20eae680fc/java.io.tmpdir/jetty-localhost-41617-hadoop-hdfs-3_4_1-tests_jar-_-any-12414465051171218250/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-03T18:55:09,295 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@346c0162{HTTP/1.1, (http/1.1)}{localhost:41617} 2024-12-03T18:55:09,295 INFO [Time-limited test {}] server.Server(415): Started @107172ms 2024-12-03T18:55:09,305 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-03T18:55:09,529 INFO [regionserver/db5a5ccf5be8:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-03T18:55:09,548 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-03T18:55:09,552 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-03T18:55:09,553 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-03T18:55:09,553 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-03T18:55:09,553 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-03T18:55:09,554 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7a18c5e6{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c4a60070-dd7e-802a-342e-5f20eae680fc/hadoop.log.dir/,AVAILABLE} 2024-12-03T18:55:09,554 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4743e1dd{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-03T18:55:09,644 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1a2936a3{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c4a60070-dd7e-802a-342e-5f20eae680fc/java.io.tmpdir/jetty-localhost-41733-hadoop-hdfs-3_4_1-tests_jar-_-any-9448251582044116494/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T18:55:09,645 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4a4bf55{HTTP/1.1, (http/1.1)}{localhost:41733} 2024-12-03T18:55:09,645 INFO [Time-limited test {}] server.Server(415): Started @107521ms 2024-12-03T18:55:09,646 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-03T18:55:09,673 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-03T18:55:09,676 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-03T18:55:09,677 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-03T18:55:09,677 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-03T18:55:09,677 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-03T18:55:09,678 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2c64d82b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c4a60070-dd7e-802a-342e-5f20eae680fc/hadoop.log.dir/,AVAILABLE} 2024-12-03T18:55:09,678 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3e10767c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-03T18:55:09,770 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@abbe752{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c4a60070-dd7e-802a-342e-5f20eae680fc/java.io.tmpdir/jetty-localhost-34275-hadoop-hdfs-3_4_1-tests_jar-_-any-13304764305405606134/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T18:55:09,770 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@42443481{HTTP/1.1, (http/1.1)}{localhost:34275} 2024-12-03T18:55:09,771 INFO [Time-limited test {}] server.Server(415): Started @107647ms 2024-12-03T18:55:09,772 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-03T18:55:10,810 WARN [Thread-672 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c4a60070-dd7e-802a-342e-5f20eae680fc/cluster_4187d5fb-6995-465f-5c7c-0ffcd2fcb479/data/data1/current/BP-619597313-172.17.0.2-1733252108905/current, will proceed with Du for space computation calculation, 2024-12-03T18:55:10,810 WARN [Thread-673 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c4a60070-dd7e-802a-342e-5f20eae680fc/cluster_4187d5fb-6995-465f-5c7c-0ffcd2fcb479/data/data2/current/BP-619597313-172.17.0.2-1733252108905/current, will proceed with Du for space computation calculation, 2024-12-03T18:55:10,830 WARN [Thread-636 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-03T18:55:10,833 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x7386ddac67dc1672 with lease ID 0xe29c90a44304063a: Processing first storage report for DS-77519873-4acf-4a3b-8908-9fb3fa1b39ff from datanode DatanodeRegistration(127.0.0.1:36129, datanodeUuid=a59547c2-5240-4f9b-b61e-f7cc2e5e9d66, infoPort=34495, infoSecurePort=0, ipcPort=44411, storageInfo=lv=-57;cid=testClusterID;nsid=1292028406;c=1733252108905) 2024-12-03T18:55:10,833 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x7386ddac67dc1672 with lease ID 0xe29c90a44304063a: from storage DS-77519873-4acf-4a3b-8908-9fb3fa1b39ff node DatanodeRegistration(127.0.0.1:36129, datanodeUuid=a59547c2-5240-4f9b-b61e-f7cc2e5e9d66, infoPort=34495, infoSecurePort=0, ipcPort=44411, storageInfo=lv=-57;cid=testClusterID;nsid=1292028406;c=1733252108905), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-03T18:55:10,833 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x7386ddac67dc1672 with lease ID 0xe29c90a44304063a: Processing first storage report for DS-edca5b81-69ce-4559-a323-e2d85db33a02 from datanode DatanodeRegistration(127.0.0.1:36129, datanodeUuid=a59547c2-5240-4f9b-b61e-f7cc2e5e9d66, infoPort=34495, infoSecurePort=0, ipcPort=44411, storageInfo=lv=-57;cid=testClusterID;nsid=1292028406;c=1733252108905) 2024-12-03T18:55:10,833 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x7386ddac67dc1672 with lease ID 0xe29c90a44304063a: from storage DS-edca5b81-69ce-4559-a323-e2d85db33a02 node DatanodeRegistration(127.0.0.1:36129, datanodeUuid=a59547c2-5240-4f9b-b61e-f7cc2e5e9d66, infoPort=34495, infoSecurePort=0, ipcPort=44411, storageInfo=lv=-57;cid=testClusterID;nsid=1292028406;c=1733252108905), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-03T18:55:10,947 WARN [Thread-683 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c4a60070-dd7e-802a-342e-5f20eae680fc/cluster_4187d5fb-6995-465f-5c7c-0ffcd2fcb479/data/data3/current/BP-619597313-172.17.0.2-1733252108905/current, will proceed with Du for space computation calculation, 2024-12-03T18:55:10,947 WARN [Thread-684 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c4a60070-dd7e-802a-342e-5f20eae680fc/cluster_4187d5fb-6995-465f-5c7c-0ffcd2fcb479/data/data4/current/BP-619597313-172.17.0.2-1733252108905/current, will proceed with Du for space computation calculation, 2024-12-03T18:55:10,963 WARN [Thread-659 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-03T18:55:10,966 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xd6127f90c00b1a1c with lease ID 0xe29c90a44304063b: Processing first storage report for DS-faa8c47c-a6f5-4610-9c09-f1450308231b from datanode DatanodeRegistration(127.0.0.1:40429, datanodeUuid=ff2ed10a-1a62-4254-b7ab-13ac5358207a, infoPort=38647, infoSecurePort=0, ipcPort=43549, storageInfo=lv=-57;cid=testClusterID;nsid=1292028406;c=1733252108905) 2024-12-03T18:55:10,966 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd6127f90c00b1a1c with lease ID 0xe29c90a44304063b: from storage DS-faa8c47c-a6f5-4610-9c09-f1450308231b node DatanodeRegistration(127.0.0.1:40429, datanodeUuid=ff2ed10a-1a62-4254-b7ab-13ac5358207a, infoPort=38647, infoSecurePort=0, ipcPort=43549, storageInfo=lv=-57;cid=testClusterID;nsid=1292028406;c=1733252108905), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-03T18:55:10,966 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xd6127f90c00b1a1c with lease ID 0xe29c90a44304063b: Processing first storage report for DS-73705ed8-497c-456d-9e1b-d162725e229f from datanode DatanodeRegistration(127.0.0.1:40429, datanodeUuid=ff2ed10a-1a62-4254-b7ab-13ac5358207a, infoPort=38647, infoSecurePort=0, ipcPort=43549, storageInfo=lv=-57;cid=testClusterID;nsid=1292028406;c=1733252108905) 2024-12-03T18:55:10,966 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd6127f90c00b1a1c with lease ID 0xe29c90a44304063b: from storage DS-73705ed8-497c-456d-9e1b-d162725e229f node DatanodeRegistration(127.0.0.1:40429, datanodeUuid=ff2ed10a-1a62-4254-b7ab-13ac5358207a, infoPort=38647, infoSecurePort=0, ipcPort=43549, storageInfo=lv=-57;cid=testClusterID;nsid=1292028406;c=1733252108905), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-03T18:55:11,012 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c4a60070-dd7e-802a-342e-5f20eae680fc 2024-12-03T18:55:11,016 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c4a60070-dd7e-802a-342e-5f20eae680fc/cluster_4187d5fb-6995-465f-5c7c-0ffcd2fcb479/zookeeper_0, clientPort=53957, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c4a60070-dd7e-802a-342e-5f20eae680fc/cluster_4187d5fb-6995-465f-5c7c-0ffcd2fcb479/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c4a60070-dd7e-802a-342e-5f20eae680fc/cluster_4187d5fb-6995-465f-5c7c-0ffcd2fcb479/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-03T18:55:11,017 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=53957 2024-12-03T18:55:11,017 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T18:55:11,019 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T18:55:11,028 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36129 is added to blk_1073741825_1001 (size=7) 2024-12-03T18:55:11,028 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40429 is added to blk_1073741825_1001 (size=7) 2024-12-03T18:55:11,029 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6 with version=8 2024-12-03T18:55:11,029 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:42673/user/jenkins/test-data/6807bb60-f78c-0ede-7870-b94fe8933208/hbase-staging 2024-12-03T18:55:11,031 INFO [Time-limited test {}] client.ConnectionUtils(128): master/db5a5ccf5be8:0 server-side Connection retries=45 2024-12-03T18:55:11,031 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-03T18:55:11,031 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-03T18:55:11,031 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-03T18:55:11,032 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-03T18:55:11,032 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-03T18:55:11,032 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-03T18:55:11,032 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-03T18:55:11,033 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:39725 2024-12-03T18:55:11,034 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:39725 connecting to ZooKeeper ensemble=127.0.0.1:53957 2024-12-03T18:55:11,095 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:397250x0, quorum=127.0.0.1:53957, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-03T18:55:11,096 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:39725-0x1019c8c56530000 connected 2024-12-03T18:55:11,179 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T18:55:11,180 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T18:55:11,183 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:39725-0x1019c8c56530000, quorum=127.0.0.1:53957, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-03T18:55:11,183 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6, hbase.cluster.distributed=false 2024-12-03T18:55:11,185 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:39725-0x1019c8c56530000, quorum=127.0.0.1:53957, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-03T18:55:11,186 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=39725 2024-12-03T18:55:11,186 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=39725 2024-12-03T18:55:11,186 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=39725 2024-12-03T18:55:11,187 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=39725 2024-12-03T18:55:11,187 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=39725 2024-12-03T18:55:11,199 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/db5a5ccf5be8:0 server-side Connection retries=45 2024-12-03T18:55:11,200 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-03T18:55:11,200 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-03T18:55:11,200 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-03T18:55:11,200 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-03T18:55:11,200 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-03T18:55:11,200 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-03T18:55:11,200 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-03T18:55:11,201 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:46359 2024-12-03T18:55:11,202 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:46359 connecting to ZooKeeper ensemble=127.0.0.1:53957 2024-12-03T18:55:11,202 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T18:55:11,204 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T18:55:11,210 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:463590x0, quorum=127.0.0.1:53957, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-03T18:55:11,210 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:463590x0, quorum=127.0.0.1:53957, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-03T18:55:11,210 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:46359-0x1019c8c56530001 connected 2024-12-03T18:55:11,211 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-03T18:55:11,211 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-03T18:55:11,212 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46359-0x1019c8c56530001, quorum=127.0.0.1:53957, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-03T18:55:11,212 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46359-0x1019c8c56530001, quorum=127.0.0.1:53957, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-03T18:55:11,216 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=46359 2024-12-03T18:55:11,216 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=46359 2024-12-03T18:55:11,217 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=46359 2024-12-03T18:55:11,217 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=46359 2024-12-03T18:55:11,218 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=46359 2024-12-03T18:55:11,234 DEBUG [M:0;db5a5ccf5be8:39725 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;db5a5ccf5be8:39725 2024-12-03T18:55:11,235 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/db5a5ccf5be8,39725,1733252111031 2024-12-03T18:55:11,241 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46359-0x1019c8c56530001, quorum=127.0.0.1:53957, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-03T18:55:11,241 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39725-0x1019c8c56530000, quorum=127.0.0.1:53957, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-03T18:55:11,242 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:39725-0x1019c8c56530000, quorum=127.0.0.1:53957, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/db5a5ccf5be8,39725,1733252111031 2024-12-03T18:55:11,252 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39725-0x1019c8c56530000, quorum=127.0.0.1:53957, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T18:55:11,252 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46359-0x1019c8c56530001, quorum=127.0.0.1:53957, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-03T18:55:11,252 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46359-0x1019c8c56530001, quorum=127.0.0.1:53957, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T18:55:11,253 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:39725-0x1019c8c56530000, quorum=127.0.0.1:53957, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-03T18:55:11,253 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/db5a5ccf5be8,39725,1733252111031 from backup master directory 2024-12-03T18:55:11,262 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46359-0x1019c8c56530001, quorum=127.0.0.1:53957, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-03T18:55:11,262 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39725-0x1019c8c56530000, quorum=127.0.0.1:53957, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/db5a5ccf5be8,39725,1733252111031 2024-12-03T18:55:11,263 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39725-0x1019c8c56530000, quorum=127.0.0.1:53957, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-03T18:55:11,263 WARN [master/db5a5ccf5be8:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-03T18:55:11,263 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=db5a5ccf5be8,39725,1733252111031 2024-12-03T18:55:11,267 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/hbase.id] with ID: 7eae729e-84d3-46b0-b5d0-e27bc66eedf5 2024-12-03T18:55:11,267 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/.tmp/hbase.id 2024-12-03T18:55:11,274 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40429 is added to blk_1073741826_1002 (size=42) 2024-12-03T18:55:11,275 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36129 is added to blk_1073741826_1002 (size=42) 2024-12-03T18:55:11,275 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/.tmp/hbase.id]:[hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/hbase.id] 2024-12-03T18:55:11,287 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T18:55:11,287 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-03T18:55:11,289 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-12-03T18:55:11,294 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39725-0x1019c8c56530000, quorum=127.0.0.1:53957, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T18:55:11,294 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46359-0x1019c8c56530001, quorum=127.0.0.1:53957, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T18:55:11,301 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36129 is added to blk_1073741827_1003 (size=196) 2024-12-03T18:55:11,301 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40429 is added to blk_1073741827_1003 (size=196) 2024-12-03T18:55:11,302 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-03T18:55:11,302 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-03T18:55:11,303 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-03T18:55:11,311 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36129 is added to blk_1073741828_1004 (size=1189) 2024-12-03T18:55:11,311 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40429 is added to blk_1073741828_1004 (size=1189) 2024-12-03T18:55:11,313 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/MasterData/data/master/store 2024-12-03T18:55:11,321 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40429 is added to blk_1073741829_1005 (size=34) 2024-12-03T18:55:11,321 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36129 is added to blk_1073741829_1005 (size=34) 2024-12-03T18:55:11,723 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T18:55:11,724 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-03T18:55:11,724 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T18:55:11,725 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T18:55:11,725 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-03T18:55:11,725 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T18:55:11,725 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T18:55:11,725 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733252111724Disabling compacts and flushes for region at 1733252111724Disabling writes for close at 1733252111725 (+1 ms)Writing region close event to WAL at 1733252111725Closed at 1733252111725 2024-12-03T18:55:11,729 WARN [master/db5a5ccf5be8:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/MasterData/data/master/store/.initializing 2024-12-03T18:55:11,729 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/MasterData/WALs/db5a5ccf5be8,39725,1733252111031 2024-12-03T18:55:11,735 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=db5a5ccf5be8%2C39725%2C1733252111031, suffix=, logDir=hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/MasterData/WALs/db5a5ccf5be8,39725,1733252111031, archiveDir=hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/MasterData/oldWALs, maxLogs=10 2024-12-03T18:55:11,735 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor db5a5ccf5be8%2C39725%2C1733252111031.1733252111735 2024-12-03T18:55:11,740 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/MasterData/WALs/db5a5ccf5be8,39725,1733252111031/db5a5ccf5be8%2C39725%2C1733252111031.1733252111735 2024-12-03T18:55:11,741 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38647:38647),(127.0.0.1/127.0.0.1:34495:34495)] 2024-12-03T18:55:11,741 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-03T18:55:11,742 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T18:55:11,742 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-03T18:55:11,742 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-03T18:55:11,743 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-03T18:55:11,745 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-03T18:55:11,745 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T18:55:11,745 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T18:55:11,746 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-03T18:55:11,747 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-03T18:55:11,747 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T18:55:11,748 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T18:55:11,748 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-03T18:55:11,749 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-03T18:55:11,750 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T18:55:11,750 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T18:55:11,750 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-03T18:55:11,752 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-03T18:55:11,752 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T18:55:11,752 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T18:55:11,753 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-03T18:55:11,753 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-03T18:55:11,754 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-03T18:55:11,755 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-03T18:55:11,755 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-03T18:55:11,756 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-03T18:55:11,757 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-03T18:55:11,759 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-03T18:55:11,760 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=828146, jitterRate=0.053042516112327576}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-03T18:55:11,761 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733252111742Initializing all the Stores at 1733252111743 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733252111743Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733252111743Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733252111743Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733252111743Cleaning up temporary data from old regions at 1733252111755 (+12 ms)Region opened successfully at 1733252111760 (+5 ms) 2024-12-03T18:55:11,761 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-03T18:55:11,764 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@23371dcb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=db5a5ccf5be8/172.17.0.2:0 2024-12-03T18:55:11,765 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-03T18:55:11,765 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-03T18:55:11,765 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-03T18:55:11,765 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-03T18:55:11,766 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-12-03T18:55:11,766 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-12-03T18:55:11,766 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-03T18:55:11,769 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-03T18:55:11,769 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39725-0x1019c8c56530000, quorum=127.0.0.1:53957, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-03T18:55:11,820 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-03T18:55:11,821 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-03T18:55:11,822 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39725-0x1019c8c56530000, quorum=127.0.0.1:53957, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-03T18:55:11,831 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-03T18:55:11,831 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-03T18:55:11,832 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39725-0x1019c8c56530000, quorum=127.0.0.1:53957, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-03T18:55:11,841 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-03T18:55:11,843 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39725-0x1019c8c56530000, quorum=127.0.0.1:53957, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-03T18:55:11,852 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-03T18:55:11,854 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39725-0x1019c8c56530000, quorum=127.0.0.1:53957, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-03T18:55:11,862 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-03T18:55:11,873 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39725-0x1019c8c56530000, quorum=127.0.0.1:53957, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-03T18:55:11,873 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46359-0x1019c8c56530001, quorum=127.0.0.1:53957, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-03T18:55:11,873 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39725-0x1019c8c56530000, quorum=127.0.0.1:53957, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T18:55:11,873 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46359-0x1019c8c56530001, quorum=127.0.0.1:53957, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T18:55:11,874 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=db5a5ccf5be8,39725,1733252111031, sessionid=0x1019c8c56530000, setting cluster-up flag (Was=false) 2024-12-03T18:55:11,894 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39725-0x1019c8c56530000, quorum=127.0.0.1:53957, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T18:55:11,894 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46359-0x1019c8c56530001, quorum=127.0.0.1:53957, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T18:55:11,926 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-03T18:55:11,927 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=db5a5ccf5be8,39725,1733252111031 2024-12-03T18:55:11,947 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46359-0x1019c8c56530001, quorum=127.0.0.1:53957, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T18:55:11,947 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39725-0x1019c8c56530000, quorum=127.0.0.1:53957, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T18:55:11,978 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-03T18:55:11,980 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=db5a5ccf5be8,39725,1733252111031 2024-12-03T18:55:11,983 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-03T18:55:11,986 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-03T18:55:11,986 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-03T18:55:11,986 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-03T18:55:11,987 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: db5a5ccf5be8,39725,1733252111031 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-03T18:55:11,989 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/db5a5ccf5be8:0, corePoolSize=5, maxPoolSize=5 2024-12-03T18:55:11,989 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/db5a5ccf5be8:0, corePoolSize=5, maxPoolSize=5 2024-12-03T18:55:11,989 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/db5a5ccf5be8:0, corePoolSize=5, maxPoolSize=5 2024-12-03T18:55:11,989 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/db5a5ccf5be8:0, corePoolSize=5, maxPoolSize=5 2024-12-03T18:55:11,989 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/db5a5ccf5be8:0, corePoolSize=10, maxPoolSize=10 2024-12-03T18:55:11,989 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/db5a5ccf5be8:0, corePoolSize=1, maxPoolSize=1 2024-12-03T18:55:11,990 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/db5a5ccf5be8:0, corePoolSize=2, maxPoolSize=2 2024-12-03T18:55:11,990 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/db5a5ccf5be8:0, corePoolSize=1, maxPoolSize=1 2024-12-03T18:55:11,990 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733252141990 2024-12-03T18:55:11,991 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-03T18:55:11,991 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-03T18:55:11,991 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-03T18:55:11,991 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-03T18:55:11,991 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-03T18:55:11,991 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-03T18:55:11,991 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-03T18:55:11,992 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-03T18:55:11,992 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-03T18:55:11,992 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-03T18:55:11,992 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-03T18:55:11,992 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-03T18:55:11,992 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-03T18:55:11,992 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-03T18:55:11,993 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/db5a5ccf5be8:0:becomeActiveMaster-HFileCleaner.large.0-1733252111992,5,FailOnTimeoutGroup] 2024-12-03T18:55:11,993 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/db5a5ccf5be8:0:becomeActiveMaster-HFileCleaner.small.0-1733252111993,5,FailOnTimeoutGroup] 2024-12-03T18:55:11,993 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-03T18:55:11,993 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-03T18:55:11,993 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-03T18:55:11,993 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-03T18:55:11,993 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T18:55:11,993 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-03T18:55:12,002 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40429 is added to blk_1073741831_1007 (size=1321) 2024-12-03T18:55:12,002 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36129 is added to blk_1073741831_1007 (size=1321) 2024-12-03T18:55:12,003 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-03T18:55:12,003 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6 2024-12-03T18:55:12,009 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36129 is added to blk_1073741832_1008 (size=32) 2024-12-03T18:55:12,009 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40429 is added to blk_1073741832_1008 (size=32) 2024-12-03T18:55:12,010 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T18:55:12,011 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-03T18:55:12,012 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-03T18:55:12,012 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T18:55:12,013 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T18:55:12,013 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-03T18:55:12,014 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-03T18:55:12,014 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T18:55:12,014 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T18:55:12,014 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-03T18:55:12,016 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-03T18:55:12,016 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T18:55:12,016 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T18:55:12,016 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-03T18:55:12,017 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-03T18:55:12,017 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T18:55:12,018 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T18:55:12,018 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-03T18:55:12,019 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/data/hbase/meta/1588230740 2024-12-03T18:55:12,019 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/data/hbase/meta/1588230740 2024-12-03T18:55:12,020 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-03T18:55:12,020 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-03T18:55:12,021 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-03T18:55:12,022 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-03T18:55:12,022 INFO [RS:0;db5a5ccf5be8:46359 {}] regionserver.HRegionServer(746): ClusterId : 7eae729e-84d3-46b0-b5d0-e27bc66eedf5 2024-12-03T18:55:12,022 DEBUG [RS:0;db5a5ccf5be8:46359 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-03T18:55:12,024 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-03T18:55:12,025 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=775551, jitterRate=-0.013836011290550232}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-03T18:55:12,025 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733252112010Initializing all the Stores at 1733252112010Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733252112011 (+1 ms)Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733252112011Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733252112011Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733252112011Cleaning up temporary data from old regions at 1733252112020 (+9 ms)Region opened successfully at 1733252112025 (+5 ms) 2024-12-03T18:55:12,026 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-03T18:55:12,026 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-03T18:55:12,026 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-03T18:55:12,026 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-03T18:55:12,026 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-03T18:55:12,026 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-03T18:55:12,026 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733252112026Disabling compacts and flushes for region at 1733252112026Disabling writes for close at 1733252112026Writing region close event to WAL at 1733252112026Closed at 1733252112026 2024-12-03T18:55:12,028 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-03T18:55:12,028 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-03T18:55:12,028 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-03T18:55:12,029 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-03T18:55:12,030 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-03T18:55:12,031 DEBUG [RS:0;db5a5ccf5be8:46359 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-03T18:55:12,032 DEBUG [RS:0;db5a5ccf5be8:46359 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-03T18:55:12,042 DEBUG [RS:0;db5a5ccf5be8:46359 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-03T18:55:12,043 DEBUG [RS:0;db5a5ccf5be8:46359 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@753efdb0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=db5a5ccf5be8/172.17.0.2:0 2024-12-03T18:55:12,058 DEBUG [RS:0;db5a5ccf5be8:46359 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;db5a5ccf5be8:46359 2024-12-03T18:55:12,058 INFO [RS:0;db5a5ccf5be8:46359 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-03T18:55:12,058 INFO [RS:0;db5a5ccf5be8:46359 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-03T18:55:12,058 DEBUG [RS:0;db5a5ccf5be8:46359 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-03T18:55:12,059 INFO [RS:0;db5a5ccf5be8:46359 {}] regionserver.HRegionServer(2659): reportForDuty to master=db5a5ccf5be8,39725,1733252111031 with port=46359, startcode=1733252111199 2024-12-03T18:55:12,059 DEBUG [RS:0;db5a5ccf5be8:46359 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-03T18:55:12,061 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50391, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-12-03T18:55:12,061 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39725 {}] master.ServerManager(363): Checking decommissioned status of RegionServer db5a5ccf5be8,46359,1733252111199 2024-12-03T18:55:12,061 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39725 {}] master.ServerManager(517): Registering regionserver=db5a5ccf5be8,46359,1733252111199 2024-12-03T18:55:12,063 DEBUG [RS:0;db5a5ccf5be8:46359 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6 2024-12-03T18:55:12,063 DEBUG [RS:0;db5a5ccf5be8:46359 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:37681 2024-12-03T18:55:12,063 DEBUG [RS:0;db5a5ccf5be8:46359 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-03T18:55:12,073 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39725-0x1019c8c56530000, quorum=127.0.0.1:53957, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-03T18:55:12,074 DEBUG [RS:0;db5a5ccf5be8:46359 {}] zookeeper.ZKUtil(111): regionserver:46359-0x1019c8c56530001, quorum=127.0.0.1:53957, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/db5a5ccf5be8,46359,1733252111199 2024-12-03T18:55:12,074 WARN [RS:0;db5a5ccf5be8:46359 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-03T18:55:12,074 INFO [RS:0;db5a5ccf5be8:46359 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-03T18:55:12,074 DEBUG [RS:0;db5a5ccf5be8:46359 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199 2024-12-03T18:55:12,074 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [db5a5ccf5be8,46359,1733252111199] 2024-12-03T18:55:12,078 INFO [RS:0;db5a5ccf5be8:46359 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-03T18:55:12,080 INFO [RS:0;db5a5ccf5be8:46359 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-03T18:55:12,081 INFO [RS:0;db5a5ccf5be8:46359 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-03T18:55:12,081 INFO [RS:0;db5a5ccf5be8:46359 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-03T18:55:12,082 INFO [RS:0;db5a5ccf5be8:46359 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-03T18:55:12,083 INFO [RS:0;db5a5ccf5be8:46359 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-03T18:55:12,083 INFO [RS:0;db5a5ccf5be8:46359 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-03T18:55:12,083 DEBUG [RS:0;db5a5ccf5be8:46359 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/db5a5ccf5be8:0, corePoolSize=1, maxPoolSize=1 2024-12-03T18:55:12,083 DEBUG [RS:0;db5a5ccf5be8:46359 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/db5a5ccf5be8:0, corePoolSize=1, maxPoolSize=1 2024-12-03T18:55:12,083 DEBUG [RS:0;db5a5ccf5be8:46359 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/db5a5ccf5be8:0, corePoolSize=1, maxPoolSize=1 2024-12-03T18:55:12,083 DEBUG [RS:0;db5a5ccf5be8:46359 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/db5a5ccf5be8:0, corePoolSize=1, maxPoolSize=1 2024-12-03T18:55:12,083 DEBUG [RS:0;db5a5ccf5be8:46359 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/db5a5ccf5be8:0, corePoolSize=1, maxPoolSize=1 2024-12-03T18:55:12,084 DEBUG [RS:0;db5a5ccf5be8:46359 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/db5a5ccf5be8:0, corePoolSize=2, maxPoolSize=2 2024-12-03T18:55:12,084 DEBUG [RS:0;db5a5ccf5be8:46359 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/db5a5ccf5be8:0, corePoolSize=1, maxPoolSize=1 2024-12-03T18:55:12,084 DEBUG [RS:0;db5a5ccf5be8:46359 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/db5a5ccf5be8:0, corePoolSize=1, maxPoolSize=1 2024-12-03T18:55:12,084 DEBUG [RS:0;db5a5ccf5be8:46359 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/db5a5ccf5be8:0, corePoolSize=1, maxPoolSize=1 2024-12-03T18:55:12,084 DEBUG [RS:0;db5a5ccf5be8:46359 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/db5a5ccf5be8:0, corePoolSize=1, maxPoolSize=1 2024-12-03T18:55:12,084 DEBUG [RS:0;db5a5ccf5be8:46359 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/db5a5ccf5be8:0, corePoolSize=1, maxPoolSize=1 2024-12-03T18:55:12,084 DEBUG [RS:0;db5a5ccf5be8:46359 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/db5a5ccf5be8:0, corePoolSize=1, maxPoolSize=1 2024-12-03T18:55:12,084 DEBUG [RS:0;db5a5ccf5be8:46359 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/db5a5ccf5be8:0, corePoolSize=3, maxPoolSize=3 2024-12-03T18:55:12,084 DEBUG [RS:0;db5a5ccf5be8:46359 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/db5a5ccf5be8:0, corePoolSize=3, maxPoolSize=3 2024-12-03T18:55:12,084 INFO [RS:0;db5a5ccf5be8:46359 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-03T18:55:12,085 INFO [RS:0;db5a5ccf5be8:46359 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-03T18:55:12,085 INFO [RS:0;db5a5ccf5be8:46359 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-03T18:55:12,085 INFO [RS:0;db5a5ccf5be8:46359 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-03T18:55:12,085 INFO [RS:0;db5a5ccf5be8:46359 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-03T18:55:12,085 INFO [RS:0;db5a5ccf5be8:46359 {}] hbase.ChoreService(168): Chore ScheduledChore name=db5a5ccf5be8,46359,1733252111199-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-03T18:55:12,097 INFO [RS:0;db5a5ccf5be8:46359 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-03T18:55:12,097 INFO [RS:0;db5a5ccf5be8:46359 {}] hbase.ChoreService(168): Chore ScheduledChore name=db5a5ccf5be8,46359,1733252111199-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-03T18:55:12,097 INFO [RS:0;db5a5ccf5be8:46359 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T18:55:12,097 INFO [RS:0;db5a5ccf5be8:46359 {}] regionserver.Replication(171): db5a5ccf5be8,46359,1733252111199 started 2024-12-03T18:55:12,131 INFO [RS:0;db5a5ccf5be8:46359 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T18:55:12,131 INFO [RS:0;db5a5ccf5be8:46359 {}] regionserver.HRegionServer(1482): Serving as db5a5ccf5be8,46359,1733252111199, RpcServer on db5a5ccf5be8/172.17.0.2:46359, sessionid=0x1019c8c56530001 2024-12-03T18:55:12,131 DEBUG [RS:0;db5a5ccf5be8:46359 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-03T18:55:12,131 DEBUG [RS:0;db5a5ccf5be8:46359 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager db5a5ccf5be8,46359,1733252111199 2024-12-03T18:55:12,131 DEBUG [RS:0;db5a5ccf5be8:46359 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'db5a5ccf5be8,46359,1733252111199' 2024-12-03T18:55:12,131 DEBUG [RS:0;db5a5ccf5be8:46359 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-03T18:55:12,132 DEBUG [RS:0;db5a5ccf5be8:46359 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-03T18:55:12,133 DEBUG [RS:0;db5a5ccf5be8:46359 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-03T18:55:12,133 DEBUG [RS:0;db5a5ccf5be8:46359 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-03T18:55:12,133 DEBUG [RS:0;db5a5ccf5be8:46359 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager db5a5ccf5be8,46359,1733252111199 2024-12-03T18:55:12,133 DEBUG [RS:0;db5a5ccf5be8:46359 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'db5a5ccf5be8,46359,1733252111199' 2024-12-03T18:55:12,133 DEBUG [RS:0;db5a5ccf5be8:46359 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-03T18:55:12,133 DEBUG [RS:0;db5a5ccf5be8:46359 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-03T18:55:12,134 DEBUG [RS:0;db5a5ccf5be8:46359 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-03T18:55:12,134 INFO [RS:0;db5a5ccf5be8:46359 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-03T18:55:12,134 INFO [RS:0;db5a5ccf5be8:46359 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-03T18:55:12,181 WARN [db5a5ccf5be8:39725 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-03T18:55:12,237 INFO [RS:0;db5a5ccf5be8:46359 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=db5a5ccf5be8%2C46359%2C1733252111199, suffix=, logDir=hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199, archiveDir=hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/oldWALs, maxLogs=32 2024-12-03T18:55:12,240 INFO [RS:0;db5a5ccf5be8:46359 {}] monitor.StreamSlowMonitor(122): New stream slow monitor db5a5ccf5be8%2C46359%2C1733252111199.1733252112239 2024-12-03T18:55:12,250 INFO [RS:0;db5a5ccf5be8:46359 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.1733252112239 2024-12-03T18:55:12,253 DEBUG [RS:0;db5a5ccf5be8:46359 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34495:34495),(127.0.0.1/127.0.0.1:38647:38647)] 2024-12-03T18:55:12,431 DEBUG [db5a5ccf5be8:39725 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-03T18:55:12,432 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=db5a5ccf5be8,46359,1733252111199 2024-12-03T18:55:12,433 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as db5a5ccf5be8,46359,1733252111199, state=OPENING 2024-12-03T18:55:12,483 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-03T18:55:12,494 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39725-0x1019c8c56530000, quorum=127.0.0.1:53957, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T18:55:12,494 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46359-0x1019c8c56530001, quorum=127.0.0.1:53957, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T18:55:12,495 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-03T18:55:12,495 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=db5a5ccf5be8,46359,1733252111199}] 2024-12-03T18:55:12,495 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-03T18:55:12,495 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-03T18:55:12,649 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-03T18:55:12,651 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58559, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-03T18:55:12,658 INFO [RS_OPEN_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-03T18:55:12,658 INFO [RS_OPEN_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-03T18:55:12,662 INFO [RS_OPEN_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=db5a5ccf5be8%2C46359%2C1733252111199.meta, suffix=.meta, logDir=hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199, archiveDir=hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/oldWALs, maxLogs=32 2024-12-03T18:55:12,663 INFO [RS_OPEN_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor db5a5ccf5be8%2C46359%2C1733252111199.meta.1733252112663.meta 2024-12-03T18:55:12,669 INFO [RS_OPEN_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.meta.1733252112663.meta 2024-12-03T18:55:12,670 DEBUG [RS_OPEN_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34495:34495),(127.0.0.1/127.0.0.1:38647:38647)] 2024-12-03T18:55:12,672 DEBUG [RS_OPEN_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-03T18:55:12,672 DEBUG [RS_OPEN_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-03T18:55:12,672 DEBUG [RS_OPEN_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-03T18:55:12,672 INFO [RS_OPEN_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-03T18:55:12,672 DEBUG [RS_OPEN_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-03T18:55:12,672 DEBUG [RS_OPEN_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T18:55:12,673 DEBUG [RS_OPEN_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-03T18:55:12,673 DEBUG [RS_OPEN_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-03T18:55:12,675 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-03T18:55:12,676 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-03T18:55:12,677 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T18:55:12,677 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T18:55:12,677 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-03T18:55:12,678 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-03T18:55:12,678 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T18:55:12,679 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T18:55:12,679 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-03T18:55:12,680 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-03T18:55:12,680 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T18:55:12,681 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T18:55:12,681 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-03T18:55:12,682 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-03T18:55:12,682 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T18:55:12,682 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T18:55:12,683 DEBUG [RS_OPEN_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-03T18:55:12,684 DEBUG [RS_OPEN_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/data/hbase/meta/1588230740 2024-12-03T18:55:12,686 DEBUG [RS_OPEN_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/data/hbase/meta/1588230740 2024-12-03T18:55:12,687 DEBUG [RS_OPEN_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-03T18:55:12,687 DEBUG [RS_OPEN_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-03T18:55:12,688 DEBUG [RS_OPEN_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-03T18:55:12,690 DEBUG [RS_OPEN_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-03T18:55:12,691 INFO [RS_OPEN_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=828139, jitterRate=0.05303385853767395}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-03T18:55:12,691 DEBUG [RS_OPEN_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-03T18:55:12,692 DEBUG [RS_OPEN_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733252112673Writing region info on filesystem at 1733252112673Initializing all the Stores at 1733252112674 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733252112674Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733252112675 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733252112675Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733252112675Cleaning up temporary data from old regions at 1733252112687 (+12 ms)Running coprocessor post-open hooks at 1733252112691 (+4 ms)Region opened successfully at 1733252112692 (+1 ms) 2024-12-03T18:55:12,693 INFO [RS_OPEN_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733252112648 2024-12-03T18:55:12,697 DEBUG [RS_OPEN_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-03T18:55:12,697 INFO [RS_OPEN_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-03T18:55:12,698 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=db5a5ccf5be8,46359,1733252111199 2024-12-03T18:55:12,699 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as db5a5ccf5be8,46359,1733252111199, state=OPEN 2024-12-03T18:55:12,735 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39725-0x1019c8c56530000, quorum=127.0.0.1:53957, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-03T18:55:12,735 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46359-0x1019c8c56530001, quorum=127.0.0.1:53957, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-03T18:55:12,735 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=db5a5ccf5be8,46359,1733252111199 2024-12-03T18:55:12,735 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-03T18:55:12,735 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-03T18:55:12,739 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-03T18:55:12,739 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=db5a5ccf5be8,46359,1733252111199 in 240 msec 2024-12-03T18:55:12,742 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-03T18:55:12,743 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 711 msec 2024-12-03T18:55:12,744 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-03T18:55:12,744 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-03T18:55:12,746 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-03T18:55:12,746 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=db5a5ccf5be8,46359,1733252111199, seqNum=-1] 2024-12-03T18:55:12,746 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-03T18:55:12,748 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36505, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-03T18:55:12,755 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 769 msec 2024-12-03T18:55:12,755 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733252112755, completionTime=-1 2024-12-03T18:55:12,755 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-03T18:55:12,755 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-03T18:55:12,758 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-12-03T18:55:12,758 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733252172758 2024-12-03T18:55:12,758 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733252232758 2024-12-03T18:55:12,758 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-12-03T18:55:12,758 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=db5a5ccf5be8,39725,1733252111031-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-03T18:55:12,758 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=db5a5ccf5be8,39725,1733252111031-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T18:55:12,758 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=db5a5ccf5be8,39725,1733252111031-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T18:55:12,758 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-db5a5ccf5be8:39725, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T18:55:12,759 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-03T18:55:12,759 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-03T18:55:12,761 DEBUG [master/db5a5ccf5be8:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-03T18:55:12,763 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.500sec 2024-12-03T18:55:12,763 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-03T18:55:12,763 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-03T18:55:12,763 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-03T18:55:12,764 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-03T18:55:12,764 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-03T18:55:12,764 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=db5a5ccf5be8,39725,1733252111031-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-03T18:55:12,764 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=db5a5ccf5be8,39725,1733252111031-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-03T18:55:12,767 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-03T18:55:12,767 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-03T18:55:12,767 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=db5a5ccf5be8,39725,1733252111031-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T18:55:12,829 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2be29fbd, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T18:55:12,829 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request db5a5ccf5be8,39725,-1 for getting cluster id 2024-12-03T18:55:12,830 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-03T18:55:12,831 DEBUG [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '7eae729e-84d3-46b0-b5d0-e27bc66eedf5' 2024-12-03T18:55:12,831 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-03T18:55:12,832 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "7eae729e-84d3-46b0-b5d0-e27bc66eedf5" 2024-12-03T18:55:12,832 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7f9ef2e9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T18:55:12,832 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [db5a5ccf5be8,39725,-1] 2024-12-03T18:55:12,832 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-03T18:55:12,833 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T18:55:12,835 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57750, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-03T18:55:12,836 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@19f5a74a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T18:55:12,836 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-03T18:55:12,837 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=db5a5ccf5be8,46359,1733252111199, seqNum=-1] 2024-12-03T18:55:12,837 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-03T18:55:12,839 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49154, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-03T18:55:12,841 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=db5a5ccf5be8,39725,1733252111031 2024-12-03T18:55:12,842 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T18:55:12,845 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-12-03T18:55:12,866 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/db5a5ccf5be8:0 server-side Connection retries=45 2024-12-03T18:55:12,866 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-03T18:55:12,866 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-03T18:55:12,866 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-03T18:55:12,866 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-03T18:55:12,866 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-03T18:55:12,866 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-03T18:55:12,866 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-03T18:55:12,867 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:44293 2024-12-03T18:55:12,869 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:44293 connecting to ZooKeeper ensemble=127.0.0.1:53957 2024-12-03T18:55:12,870 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T18:55:12,873 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T18:55:12,894 DEBUG [pool-381-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:442930x0, quorum=127.0.0.1:53957, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-03T18:55:12,895 DEBUG [Time-limited test {}] zookeeper.ZKUtil(111): regionserver:442930x0, quorum=127.0.0.1:53957, baseZNode=/hbase Set watcher on existing znode=/hbase/running 2024-12-03T18:55:12,895 DEBUG [pool-381-thread-1 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: INIT 2024-12-03T18:55:12,895 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:44293-0x1019c8c56530002 connected 2024-12-03T18:55:12,896 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-03T18:55:12,896 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-03T18:55:12,897 DEBUG [Time-limited test {}] zookeeper.ZKUtil(111): regionserver:44293-0x1019c8c56530002, quorum=127.0.0.1:53957, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-03T18:55:12,898 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44293-0x1019c8c56530002, quorum=127.0.0.1:53957, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-03T18:55:12,899 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=44293 2024-12-03T18:55:12,899 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=44293 2024-12-03T18:55:12,900 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=44293 2024-12-03T18:55:12,902 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=44293 2024-12-03T18:55:12,902 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=44293 2024-12-03T18:55:12,904 INFO [RS:1;db5a5ccf5be8:44293 {}] regionserver.HRegionServer(746): ClusterId : 7eae729e-84d3-46b0-b5d0-e27bc66eedf5 2024-12-03T18:55:12,904 DEBUG [RS:1;db5a5ccf5be8:44293 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-03T18:55:12,916 DEBUG [RS:1;db5a5ccf5be8:44293 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-03T18:55:12,916 DEBUG [RS:1;db5a5ccf5be8:44293 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-03T18:55:12,926 DEBUG [RS:1;db5a5ccf5be8:44293 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-03T18:55:12,927 DEBUG [RS:1;db5a5ccf5be8:44293 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@47d8087b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=db5a5ccf5be8/172.17.0.2:0 2024-12-03T18:55:12,936 DEBUG [RS:1;db5a5ccf5be8:44293 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;db5a5ccf5be8:44293 2024-12-03T18:55:12,937 INFO [RS:1;db5a5ccf5be8:44293 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-03T18:55:12,937 INFO [RS:1;db5a5ccf5be8:44293 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-03T18:55:12,937 DEBUG [RS:1;db5a5ccf5be8:44293 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-03T18:55:12,938 INFO [RS:1;db5a5ccf5be8:44293 {}] regionserver.HRegionServer(2659): reportForDuty to master=db5a5ccf5be8,39725,1733252111031 with port=44293, startcode=1733252112865 2024-12-03T18:55:12,938 DEBUG [RS:1;db5a5ccf5be8:44293 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-03T18:55:12,939 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40691, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.3 (auth:SIMPLE), service=RegionServerStatusService 2024-12-03T18:55:12,940 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39725 {}] master.ServerManager(363): Checking decommissioned status of RegionServer db5a5ccf5be8,44293,1733252112865 2024-12-03T18:55:12,940 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39725 {}] master.ServerManager(517): Registering regionserver=db5a5ccf5be8,44293,1733252112865 2024-12-03T18:55:12,942 DEBUG [RS:1;db5a5ccf5be8:44293 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6 2024-12-03T18:55:12,942 DEBUG [RS:1;db5a5ccf5be8:44293 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:37681 2024-12-03T18:55:12,942 DEBUG [RS:1;db5a5ccf5be8:44293 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-03T18:55:12,946 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39725-0x1019c8c56530000, quorum=127.0.0.1:53957, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-03T18:55:12,947 DEBUG [RS:1;db5a5ccf5be8:44293 {}] zookeeper.ZKUtil(111): regionserver:44293-0x1019c8c56530002, quorum=127.0.0.1:53957, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/db5a5ccf5be8,44293,1733252112865 2024-12-03T18:55:12,947 WARN [RS:1;db5a5ccf5be8:44293 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-03T18:55:12,947 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [db5a5ccf5be8,44293,1733252112865] 2024-12-03T18:55:12,947 INFO [RS:1;db5a5ccf5be8:44293 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-03T18:55:12,947 DEBUG [RS:1;db5a5ccf5be8:44293 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,44293,1733252112865 2024-12-03T18:55:12,952 INFO [RS:1;db5a5ccf5be8:44293 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-03T18:55:12,955 INFO [RS:1;db5a5ccf5be8:44293 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-03T18:55:12,955 INFO [RS:1;db5a5ccf5be8:44293 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-03T18:55:12,955 INFO [RS:1;db5a5ccf5be8:44293 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-03T18:55:12,956 INFO [RS:1;db5a5ccf5be8:44293 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-03T18:55:12,957 INFO [RS:1;db5a5ccf5be8:44293 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-03T18:55:12,957 INFO [RS:1;db5a5ccf5be8:44293 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-03T18:55:12,957 DEBUG [RS:1;db5a5ccf5be8:44293 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/db5a5ccf5be8:0, corePoolSize=1, maxPoolSize=1 2024-12-03T18:55:12,957 DEBUG [RS:1;db5a5ccf5be8:44293 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/db5a5ccf5be8:0, corePoolSize=1, maxPoolSize=1 2024-12-03T18:55:12,957 DEBUG [RS:1;db5a5ccf5be8:44293 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/db5a5ccf5be8:0, corePoolSize=1, maxPoolSize=1 2024-12-03T18:55:12,958 DEBUG [RS:1;db5a5ccf5be8:44293 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/db5a5ccf5be8:0, corePoolSize=1, maxPoolSize=1 2024-12-03T18:55:12,958 DEBUG [RS:1;db5a5ccf5be8:44293 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/db5a5ccf5be8:0, corePoolSize=1, maxPoolSize=1 2024-12-03T18:55:12,958 DEBUG [RS:1;db5a5ccf5be8:44293 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/db5a5ccf5be8:0, corePoolSize=2, maxPoolSize=2 2024-12-03T18:55:12,958 DEBUG [RS:1;db5a5ccf5be8:44293 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/db5a5ccf5be8:0, corePoolSize=1, maxPoolSize=1 2024-12-03T18:55:12,958 DEBUG [RS:1;db5a5ccf5be8:44293 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/db5a5ccf5be8:0, corePoolSize=1, maxPoolSize=1 2024-12-03T18:55:12,958 DEBUG [RS:1;db5a5ccf5be8:44293 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/db5a5ccf5be8:0, corePoolSize=1, maxPoolSize=1 2024-12-03T18:55:12,958 DEBUG [RS:1;db5a5ccf5be8:44293 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/db5a5ccf5be8:0, corePoolSize=1, maxPoolSize=1 2024-12-03T18:55:12,958 DEBUG [RS:1;db5a5ccf5be8:44293 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/db5a5ccf5be8:0, corePoolSize=1, maxPoolSize=1 2024-12-03T18:55:12,958 DEBUG [RS:1;db5a5ccf5be8:44293 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/db5a5ccf5be8:0, corePoolSize=1, maxPoolSize=1 2024-12-03T18:55:12,958 DEBUG [RS:1;db5a5ccf5be8:44293 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/db5a5ccf5be8:0, corePoolSize=3, maxPoolSize=3 2024-12-03T18:55:12,958 DEBUG [RS:1;db5a5ccf5be8:44293 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/db5a5ccf5be8:0, corePoolSize=3, maxPoolSize=3 2024-12-03T18:55:12,959 INFO [RS:1;db5a5ccf5be8:44293 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-03T18:55:12,959 INFO [RS:1;db5a5ccf5be8:44293 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-03T18:55:12,959 INFO [RS:1;db5a5ccf5be8:44293 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-03T18:55:12,959 INFO [RS:1;db5a5ccf5be8:44293 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-03T18:55:12,959 INFO [RS:1;db5a5ccf5be8:44293 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-03T18:55:12,959 INFO [RS:1;db5a5ccf5be8:44293 {}] hbase.ChoreService(168): Chore ScheduledChore name=db5a5ccf5be8,44293,1733252112865-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-03T18:55:12,973 INFO [RS:1;db5a5ccf5be8:44293 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-03T18:55:12,973 INFO [RS:1;db5a5ccf5be8:44293 {}] hbase.ChoreService(168): Chore ScheduledChore name=db5a5ccf5be8,44293,1733252112865-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-03T18:55:12,973 INFO [RS:1;db5a5ccf5be8:44293 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T18:55:12,973 INFO [RS:1;db5a5ccf5be8:44293 {}] regionserver.Replication(171): db5a5ccf5be8,44293,1733252112865 started 2024-12-03T18:55:12,983 INFO [RS:1;db5a5ccf5be8:44293 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T18:55:12,983 INFO [RS:1;db5a5ccf5be8:44293 {}] regionserver.HRegionServer(1482): Serving as db5a5ccf5be8,44293,1733252112865, RpcServer on db5a5ccf5be8/172.17.0.2:44293, sessionid=0x1019c8c56530002 2024-12-03T18:55:12,984 DEBUG [RS:1;db5a5ccf5be8:44293 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-03T18:55:12,984 INFO [Time-limited test {}] hbase.HBaseTestingUtil(2882): Started new server=Thread[RS:1;db5a5ccf5be8:44293,5,FailOnTimeoutGroup] 2024-12-03T18:55:12,984 DEBUG [RS:1;db5a5ccf5be8:44293 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager db5a5ccf5be8,44293,1733252112865 2024-12-03T18:55:12,984 DEBUG [RS:1;db5a5ccf5be8:44293 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'db5a5ccf5be8,44293,1733252112865' 2024-12-03T18:55:12,984 DEBUG [RS:1;db5a5ccf5be8:44293 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-03T18:55:12,984 INFO [Time-limited test {}] wal.TestLogRolling(207): Replication=2 2024-12-03T18:55:12,984 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-03T18:55:12,984 DEBUG [RS:1;db5a5ccf5be8:44293 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-03T18:55:12,985 DEBUG [RS:1;db5a5ccf5be8:44293 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-03T18:55:12,985 DEBUG [RS:1;db5a5ccf5be8:44293 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-03T18:55:12,985 DEBUG [RS:1;db5a5ccf5be8:44293 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager db5a5ccf5be8,44293,1733252112865 2024-12-03T18:55:12,985 DEBUG [RS:1;db5a5ccf5be8:44293 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'db5a5ccf5be8,44293,1733252112865' 2024-12-03T18:55:12,985 DEBUG [RS:1;db5a5ccf5be8:44293 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-03T18:55:12,986 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.AsyncConnectionImpl(321): The fetched master address is db5a5ccf5be8,39725,1733252111031 2024-12-03T18:55:12,986 DEBUG [RS:1;db5a5ccf5be8:44293 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-03T18:55:12,986 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@604ab482 2024-12-03T18:55:12,986 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-03T18:55:12,986 DEBUG [RS:1;db5a5ccf5be8:44293 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-03T18:55:12,986 INFO [RS:1;db5a5ccf5be8:44293 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-03T18:55:12,986 INFO [RS:1;db5a5ccf5be8:44293 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-03T18:55:12,988 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57758, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-03T18:55:12,988 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39725 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-12-03T18:55:12,988 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39725 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-12-03T18:55:12,988 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39725 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRollOnDatanodeDeath', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-03T18:55:12,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39725 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath 2024-12-03T18:55:12,991 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_PRE_OPERATION 2024-12-03T18:55:12,991 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T18:55:12,992 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39725 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRollOnDatanodeDeath" procId is: 4 2024-12-03T18:55:12,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39725 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-03T18:55:12,993 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-03T18:55:13,000 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40429 is added to blk_1073741835_1011 (size=393) 2024-12-03T18:55:13,000 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36129 is added to blk_1073741835_1011 (size=393) 2024-12-03T18:55:13,002 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => ffcdd949a24b91f4996ce458c5691d39, NAME => 'TestLogRolling-testLogRollOnDatanodeDeath,,1733252112988.ffcdd949a24b91f4996ce458c5691d39.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRollOnDatanodeDeath', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6 2024-12-03T18:55:13,008 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40429 is added to blk_1073741836_1012 (size=76) 2024-12-03T18:55:13,009 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36129 is added to blk_1073741836_1012 (size=76) 2024-12-03T18:55:13,009 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnDatanodeDeath,,1733252112988.ffcdd949a24b91f4996ce458c5691d39.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T18:55:13,010 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1722): Closing ffcdd949a24b91f4996ce458c5691d39, disabling compactions & flushes 2024-12-03T18:55:13,010 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnDatanodeDeath,,1733252112988.ffcdd949a24b91f4996ce458c5691d39. 2024-12-03T18:55:13,010 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1733252112988.ffcdd949a24b91f4996ce458c5691d39. 2024-12-03T18:55:13,010 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1733252112988.ffcdd949a24b91f4996ce458c5691d39. after waiting 0 ms 2024-12-03T18:55:13,010 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnDatanodeDeath,,1733252112988.ffcdd949a24b91f4996ce458c5691d39. 2024-12-03T18:55:13,010 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1733252112988.ffcdd949a24b91f4996ce458c5691d39. 2024-12-03T18:55:13,010 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1676): Region close journal for ffcdd949a24b91f4996ce458c5691d39: Waiting for close lock at 1733252113009Disabling compacts and flushes for region at 1733252113009Disabling writes for close at 1733252113010 (+1 ms)Writing region close event to WAL at 1733252113010Closed at 1733252113010 2024-12-03T18:55:13,012 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_ADD_TO_META 2024-12-03T18:55:13,012 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRollOnDatanodeDeath,,1733252112988.ffcdd949a24b91f4996ce458c5691d39.","families":{"info":[{"qualifier":"regioninfo","vlen":75,"tag":[],"timestamp":"1733252113012"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733252113012"}]},"ts":"1733252113012"} 2024-12-03T18:55:13,015 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-03T18:55:13,017 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-03T18:55:13,017 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnDatanodeDeath","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733252113017"}]},"ts":"1733252113017"} 2024-12-03T18:55:13,019 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnDatanodeDeath, state=ENABLING in hbase:meta 2024-12-03T18:55:13,020 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=ffcdd949a24b91f4996ce458c5691d39, ASSIGN}] 2024-12-03T18:55:13,021 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=ffcdd949a24b91f4996ce458c5691d39, ASSIGN 2024-12-03T18:55:13,022 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=ffcdd949a24b91f4996ce458c5691d39, ASSIGN; state=OFFLINE, location=db5a5ccf5be8,46359,1733252111199; forceNewPlan=false, retain=false 2024-12-03T18:55:13,090 INFO [RS:1;db5a5ccf5be8:44293 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=db5a5ccf5be8%2C44293%2C1733252112865, suffix=, logDir=hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,44293,1733252112865, archiveDir=hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/oldWALs, maxLogs=32 2024-12-03T18:55:13,092 INFO [RS:1;db5a5ccf5be8:44293 {}] monitor.StreamSlowMonitor(122): New stream slow monitor db5a5ccf5be8%2C44293%2C1733252112865.1733252113091 2024-12-03T18:55:13,100 INFO [RS:1;db5a5ccf5be8:44293 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,44293,1733252112865/db5a5ccf5be8%2C44293%2C1733252112865.1733252113091 2024-12-03T18:55:13,101 DEBUG [RS:1;db5a5ccf5be8:44293 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34495:34495),(127.0.0.1/127.0.0.1:38647:38647)] 2024-12-03T18:55:13,173 INFO [db5a5ccf5be8:39725 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-03T18:55:13,174 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=ffcdd949a24b91f4996ce458c5691d39, regionState=OPENING, regionLocation=db5a5ccf5be8,46359,1733252111199 2024-12-03T18:55:13,177 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=ffcdd949a24b91f4996ce458c5691d39, ASSIGN because future has completed 2024-12-03T18:55:13,178 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure ffcdd949a24b91f4996ce458c5691d39, server=db5a5ccf5be8,46359,1733252111199}] 2024-12-03T18:55:13,337 INFO [RS_OPEN_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRollOnDatanodeDeath,,1733252112988.ffcdd949a24b91f4996ce458c5691d39. 2024-12-03T18:55:13,337 DEBUG [RS_OPEN_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => ffcdd949a24b91f4996ce458c5691d39, NAME => 'TestLogRolling-testLogRollOnDatanodeDeath,,1733252112988.ffcdd949a24b91f4996ce458c5691d39.', STARTKEY => '', ENDKEY => ''} 2024-12-03T18:55:13,338 DEBUG [RS_OPEN_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRollOnDatanodeDeath ffcdd949a24b91f4996ce458c5691d39 2024-12-03T18:55:13,338 DEBUG [RS_OPEN_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnDatanodeDeath,,1733252112988.ffcdd949a24b91f4996ce458c5691d39.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T18:55:13,338 DEBUG [RS_OPEN_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for ffcdd949a24b91f4996ce458c5691d39 2024-12-03T18:55:13,338 DEBUG [RS_OPEN_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for ffcdd949a24b91f4996ce458c5691d39 2024-12-03T18:55:13,340 INFO [StoreOpener-ffcdd949a24b91f4996ce458c5691d39-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region ffcdd949a24b91f4996ce458c5691d39 2024-12-03T18:55:13,341 INFO [StoreOpener-ffcdd949a24b91f4996ce458c5691d39-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region ffcdd949a24b91f4996ce458c5691d39 columnFamilyName info 2024-12-03T18:55:13,341 DEBUG [StoreOpener-ffcdd949a24b91f4996ce458c5691d39-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T18:55:13,342 INFO [StoreOpener-ffcdd949a24b91f4996ce458c5691d39-1 {}] regionserver.HStore(327): Store=ffcdd949a24b91f4996ce458c5691d39/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T18:55:13,342 DEBUG [RS_OPEN_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for ffcdd949a24b91f4996ce458c5691d39 2024-12-03T18:55:13,343 DEBUG [RS_OPEN_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ffcdd949a24b91f4996ce458c5691d39 2024-12-03T18:55:13,344 DEBUG [RS_OPEN_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ffcdd949a24b91f4996ce458c5691d39 2024-12-03T18:55:13,344 DEBUG [RS_OPEN_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for ffcdd949a24b91f4996ce458c5691d39 2024-12-03T18:55:13,344 DEBUG [RS_OPEN_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for ffcdd949a24b91f4996ce458c5691d39 2024-12-03T18:55:13,346 DEBUG [RS_OPEN_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for ffcdd949a24b91f4996ce458c5691d39 2024-12-03T18:55:13,349 DEBUG [RS_OPEN_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ffcdd949a24b91f4996ce458c5691d39/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-03T18:55:13,349 INFO [RS_OPEN_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened ffcdd949a24b91f4996ce458c5691d39; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=772738, jitterRate=-0.017413198947906494}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-03T18:55:13,349 DEBUG [RS_OPEN_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for ffcdd949a24b91f4996ce458c5691d39 2024-12-03T18:55:13,350 DEBUG [RS_OPEN_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for ffcdd949a24b91f4996ce458c5691d39: Running coprocessor pre-open hook at 1733252113338Writing region info on filesystem at 1733252113338Initializing all the Stores at 1733252113339 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733252113339Cleaning up temporary data from old regions at 1733252113344 (+5 ms)Running coprocessor post-open hooks at 1733252113349 (+5 ms)Region opened successfully at 1733252113350 (+1 ms) 2024-12-03T18:55:13,351 INFO [RS_OPEN_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRollOnDatanodeDeath,,1733252112988.ffcdd949a24b91f4996ce458c5691d39., pid=6, masterSystemTime=1733252113333 2024-12-03T18:55:13,354 DEBUG [RS_OPEN_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRollOnDatanodeDeath,,1733252112988.ffcdd949a24b91f4996ce458c5691d39. 2024-12-03T18:55:13,354 INFO [RS_OPEN_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRollOnDatanodeDeath,,1733252112988.ffcdd949a24b91f4996ce458c5691d39. 2024-12-03T18:55:13,355 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=ffcdd949a24b91f4996ce458c5691d39, regionState=OPEN, openSeqNum=2, regionLocation=db5a5ccf5be8,46359,1733252111199 2024-12-03T18:55:13,359 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure ffcdd949a24b91f4996ce458c5691d39, server=db5a5ccf5be8,46359,1733252111199 because future has completed 2024-12-03T18:55:13,364 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-12-03T18:55:13,364 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure ffcdd949a24b91f4996ce458c5691d39, server=db5a5ccf5be8,46359,1733252111199 in 183 msec 2024-12-03T18:55:13,369 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-12-03T18:55:13,369 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=ffcdd949a24b91f4996ce458c5691d39, ASSIGN in 345 msec 2024-12-03T18:55:13,370 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-03T18:55:13,370 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnDatanodeDeath","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733252113370"}]},"ts":"1733252113370"} 2024-12-03T18:55:13,373 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnDatanodeDeath, state=ENABLED in hbase:meta 2024-12-03T18:55:13,374 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_POST_OPERATION 2024-12-03T18:55:13,377 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath in 386 msec 2024-12-03T18:55:18,181 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-03T18:55:18,185 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:55:18,206 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:55:18,207 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:55:18,207 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:55:18,214 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-03T18:55:18,214 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-12-03T18:55:18,214 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath 2024-12-03T18:55:18,214 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath Metrics about Tables on a single HBase RegionServer 2024-12-03T18:55:18,215 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-03T18:55:18,215 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-12-03T18:55:18,215 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRollOnDatanodeDeath' 2024-12-03T18:55:23,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39725 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-03T18:55:23,092 INFO [RPCClient-NioEventLoopGroup-4-11 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRollOnDatanodeDeath completed 2024-12-03T18:55:23,092 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRollOnDatanodeDeath,, stopping at row=TestLogRolling-testLogRollOnDatanodeDeath ,, for max=2147483647 with caching=100 2024-12-03T18:55:23,100 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRollOnDatanodeDeath 2024-12-03T18:55:23,100 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRollOnDatanodeDeath,,1733252112988.ffcdd949a24b91f4996ce458c5691d39. 2024-12-03T18:55:23,113 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-03T18:55:23,116 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-03T18:55:23,117 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-03T18:55:23,117 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-03T18:55:23,117 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-03T18:55:23,117 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@30008f24{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c4a60070-dd7e-802a-342e-5f20eae680fc/hadoop.log.dir/,AVAILABLE} 2024-12-03T18:55:23,118 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2ab5b96c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-03T18:55:23,209 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3fd17220{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c4a60070-dd7e-802a-342e-5f20eae680fc/java.io.tmpdir/jetty-localhost-44703-hadoop-hdfs-3_4_1-tests_jar-_-any-12377854392739769307/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T18:55:23,210 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4c68f920{HTTP/1.1, (http/1.1)}{localhost:44703} 2024-12-03T18:55:23,210 INFO [Time-limited test {}] server.Server(415): Started @121086ms 2024-12-03T18:55:23,211 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-03T18:55:23,242 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-03T18:55:23,244 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-03T18:55:23,246 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-03T18:55:23,246 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-03T18:55:23,246 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-03T18:55:23,246 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3f85c2b2{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c4a60070-dd7e-802a-342e-5f20eae680fc/hadoop.log.dir/,AVAILABLE} 2024-12-03T18:55:23,246 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@64b89ed5{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-03T18:55:23,334 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6d82aaea{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c4a60070-dd7e-802a-342e-5f20eae680fc/java.io.tmpdir/jetty-localhost-35979-hadoop-hdfs-3_4_1-tests_jar-_-any-13276217043222396523/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T18:55:23,335 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3904e150{HTTP/1.1, (http/1.1)}{localhost:35979} 2024-12-03T18:55:23,335 INFO [Time-limited test {}] server.Server(415): Started @121212ms 2024-12-03T18:55:23,336 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-03T18:55:23,363 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-03T18:55:23,366 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-03T18:55:23,367 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-03T18:55:23,367 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-03T18:55:23,367 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-03T18:55:23,368 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6b21f544{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c4a60070-dd7e-802a-342e-5f20eae680fc/hadoop.log.dir/,AVAILABLE} 2024-12-03T18:55:23,369 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@47432b7b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-03T18:55:23,457 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@20faceaa{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c4a60070-dd7e-802a-342e-5f20eae680fc/java.io.tmpdir/jetty-localhost-37213-hadoop-hdfs-3_4_1-tests_jar-_-any-8676001970384377776/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T18:55:23,458 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3e1ad43e{HTTP/1.1, (http/1.1)}{localhost:37213} 2024-12-03T18:55:23,458 INFO [Time-limited test {}] server.Server(415): Started @121334ms 2024-12-03T18:55:23,459 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-03T18:55:24,619 WARN [Thread-867 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c4a60070-dd7e-802a-342e-5f20eae680fc/cluster_4187d5fb-6995-465f-5c7c-0ffcd2fcb479/data/data5/current/BP-619597313-172.17.0.2-1733252108905/current, will proceed with Du for space computation calculation, 2024-12-03T18:55:24,619 WARN [Thread-868 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c4a60070-dd7e-802a-342e-5f20eae680fc/cluster_4187d5fb-6995-465f-5c7c-0ffcd2fcb479/data/data6/current/BP-619597313-172.17.0.2-1733252108905/current, will proceed with Du for space computation calculation, 2024-12-03T18:55:24,639 WARN [Thread-808 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-03T18:55:24,641 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x58c66daf2165b8cf with lease ID 0xe29c90a44304063c: Processing first storage report for DS-415cd1d5-a105-4c92-b8f8-993dcb323415 from datanode DatanodeRegistration(127.0.0.1:38523, datanodeUuid=749c500c-5395-4d83-978d-d9c295c6feba, infoPort=44091, infoSecurePort=0, ipcPort=35025, storageInfo=lv=-57;cid=testClusterID;nsid=1292028406;c=1733252108905) 2024-12-03T18:55:24,641 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x58c66daf2165b8cf with lease ID 0xe29c90a44304063c: from storage DS-415cd1d5-a105-4c92-b8f8-993dcb323415 node DatanodeRegistration(127.0.0.1:38523, datanodeUuid=749c500c-5395-4d83-978d-d9c295c6feba, infoPort=44091, infoSecurePort=0, ipcPort=35025, storageInfo=lv=-57;cid=testClusterID;nsid=1292028406;c=1733252108905), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-03T18:55:24,642 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x58c66daf2165b8cf with lease ID 0xe29c90a44304063c: Processing first storage report for DS-1b566393-9fba-43d7-b94e-73fb50597823 from datanode DatanodeRegistration(127.0.0.1:38523, datanodeUuid=749c500c-5395-4d83-978d-d9c295c6feba, infoPort=44091, infoSecurePort=0, ipcPort=35025, storageInfo=lv=-57;cid=testClusterID;nsid=1292028406;c=1733252108905) 2024-12-03T18:55:24,642 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x58c66daf2165b8cf with lease ID 0xe29c90a44304063c: from storage DS-1b566393-9fba-43d7-b94e-73fb50597823 node DatanodeRegistration(127.0.0.1:38523, datanodeUuid=749c500c-5395-4d83-978d-d9c295c6feba, infoPort=44091, infoSecurePort=0, ipcPort=35025, storageInfo=lv=-57;cid=testClusterID;nsid=1292028406;c=1733252108905), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-03T18:55:24,759 WARN [Thread-879 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c4a60070-dd7e-802a-342e-5f20eae680fc/cluster_4187d5fb-6995-465f-5c7c-0ffcd2fcb479/data/data8/current/BP-619597313-172.17.0.2-1733252108905/current, will proceed with Du for space computation calculation, 2024-12-03T18:55:24,759 WARN [Thread-878 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c4a60070-dd7e-802a-342e-5f20eae680fc/cluster_4187d5fb-6995-465f-5c7c-0ffcd2fcb479/data/data7/current/BP-619597313-172.17.0.2-1733252108905/current, will proceed with Du for space computation calculation, 2024-12-03T18:55:24,782 WARN [Thread-830 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-03T18:55:24,784 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x66b2b9825b954a79 with lease ID 0xe29c90a44304063d: Processing first storage report for DS-65653d36-c9d6-40de-810f-8f14689aee01 from datanode DatanodeRegistration(127.0.0.1:44945, datanodeUuid=e833ae0c-c540-41ef-8717-16fc4b94b63f, infoPort=40425, infoSecurePort=0, ipcPort=40041, storageInfo=lv=-57;cid=testClusterID;nsid=1292028406;c=1733252108905) 2024-12-03T18:55:24,784 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x66b2b9825b954a79 with lease ID 0xe29c90a44304063d: from storage DS-65653d36-c9d6-40de-810f-8f14689aee01 node DatanodeRegistration(127.0.0.1:44945, datanodeUuid=e833ae0c-c540-41ef-8717-16fc4b94b63f, infoPort=40425, infoSecurePort=0, ipcPort=40041, storageInfo=lv=-57;cid=testClusterID;nsid=1292028406;c=1733252108905), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-03T18:55:24,784 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x66b2b9825b954a79 with lease ID 0xe29c90a44304063d: Processing first storage report for DS-1d32f6fd-8555-4e5c-94b1-fa738fae4927 from datanode DatanodeRegistration(127.0.0.1:44945, datanodeUuid=e833ae0c-c540-41ef-8717-16fc4b94b63f, infoPort=40425, infoSecurePort=0, ipcPort=40041, storageInfo=lv=-57;cid=testClusterID;nsid=1292028406;c=1733252108905) 2024-12-03T18:55:24,784 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x66b2b9825b954a79 with lease ID 0xe29c90a44304063d: from storage DS-1d32f6fd-8555-4e5c-94b1-fa738fae4927 node DatanodeRegistration(127.0.0.1:44945, datanodeUuid=e833ae0c-c540-41ef-8717-16fc4b94b63f, infoPort=40425, infoSecurePort=0, ipcPort=40041, storageInfo=lv=-57;cid=testClusterID;nsid=1292028406;c=1733252108905), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-03T18:55:24,900 WARN [Thread-889 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c4a60070-dd7e-802a-342e-5f20eae680fc/cluster_4187d5fb-6995-465f-5c7c-0ffcd2fcb479/data/data9/current/BP-619597313-172.17.0.2-1733252108905/current, will proceed with Du for space computation calculation, 2024-12-03T18:55:24,900 WARN [Thread-890 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c4a60070-dd7e-802a-342e-5f20eae680fc/cluster_4187d5fb-6995-465f-5c7c-0ffcd2fcb479/data/data10/current/BP-619597313-172.17.0.2-1733252108905/current, will proceed with Du for space computation calculation, 2024-12-03T18:55:24,916 WARN [Thread-852 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-03T18:55:24,918 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xe92d38156e759f09 with lease ID 0xe29c90a44304063e: Processing first storage report for DS-49a6f974-45fa-4a38-a4e1-bea744d45f50 from datanode DatanodeRegistration(127.0.0.1:33593, datanodeUuid=ac0ce44c-5180-4526-a3b0-db40627349b1, infoPort=44371, infoSecurePort=0, ipcPort=42883, storageInfo=lv=-57;cid=testClusterID;nsid=1292028406;c=1733252108905) 2024-12-03T18:55:24,918 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe92d38156e759f09 with lease ID 0xe29c90a44304063e: from storage DS-49a6f974-45fa-4a38-a4e1-bea744d45f50 node DatanodeRegistration(127.0.0.1:33593, datanodeUuid=ac0ce44c-5180-4526-a3b0-db40627349b1, infoPort=44371, infoSecurePort=0, ipcPort=42883, storageInfo=lv=-57;cid=testClusterID;nsid=1292028406;c=1733252108905), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-03T18:55:24,918 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xe92d38156e759f09 with lease ID 0xe29c90a44304063e: Processing first storage report for DS-76c542d7-1982-4401-854c-61b4940fad25 from datanode DatanodeRegistration(127.0.0.1:33593, datanodeUuid=ac0ce44c-5180-4526-a3b0-db40627349b1, infoPort=44371, infoSecurePort=0, ipcPort=42883, storageInfo=lv=-57;cid=testClusterID;nsid=1292028406;c=1733252108905) 2024-12-03T18:55:24,918 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe92d38156e759f09 with lease ID 0xe29c90a44304063e: from storage DS-76c542d7-1982-4401-854c-61b4940fad25 node DatanodeRegistration(127.0.0.1:33593, datanodeUuid=ac0ce44c-5180-4526-a3b0-db40627349b1, infoPort=44371, infoSecurePort=0, ipcPort=42883, storageInfo=lv=-57;cid=testClusterID;nsid=1292028406;c=1733252108905), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-03T18:55:25,000 WARN [ResponseProcessor for block BP-619597313-172.17.0.2-1733252108905:blk_1073741830_1006 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-619597313-172.17.0.2-1733252108905:blk_1073741830_1006 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T18:55:25,000 WARN [ResponseProcessor for block BP-619597313-172.17.0.2-1733252108905:blk_1073741833_1009 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-619597313-172.17.0.2-1733252108905:blk_1073741833_1009 java.io.IOException: Bad response ERROR for BP-619597313-172.17.0.2-1733252108905:blk_1073741833_1009 from datanode DatanodeInfoWithStorage[127.0.0.1:40429,DS-faa8c47c-a6f5-4610-9c09-f1450308231b,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T18:55:25,000 WARN [ResponseProcessor for block BP-619597313-172.17.0.2-1733252108905:blk_1073741837_1013 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-619597313-172.17.0.2-1733252108905:blk_1073741837_1013 java.io.IOException: Bad response ERROR for BP-619597313-172.17.0.2-1733252108905:blk_1073741837_1013 from datanode DatanodeInfoWithStorage[127.0.0.1:40429,DS-faa8c47c-a6f5-4610-9c09-f1450308231b,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T18:55:25,000 WARN [ResponseProcessor for block BP-619597313-172.17.0.2-1733252108905:blk_1073741834_1010 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-619597313-172.17.0.2-1733252108905:blk_1073741834_1010 java.io.IOException: Bad response ERROR for BP-619597313-172.17.0.2-1733252108905:blk_1073741834_1010 from datanode DatanodeInfoWithStorage[127.0.0.1:40429,DS-faa8c47c-a6f5-4610-9c09-f1450308231b,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T18:55:25,001 WARN [DataStreamer for file /user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,44293,1733252112865/db5a5ccf5be8%2C44293%2C1733252112865.1733252113091 block BP-619597313-172.17.0.2-1733252108905:blk_1073741837_1013 {}] hdfs.DataStreamer(1731): Error Recovery for BP-619597313-172.17.0.2-1733252108905:blk_1073741837_1013 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36129,DS-77519873-4acf-4a3b-8908-9fb3fa1b39ff,DISK], DatanodeInfoWithStorage[127.0.0.1:40429,DS-faa8c47c-a6f5-4610-9c09-f1450308231b,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:40429,DS-faa8c47c-a6f5-4610-9c09-f1450308231b,DISK]) is bad. 2024-12-03T18:55:25,001 WARN [DataStreamer for file /user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.1733252112239 block BP-619597313-172.17.0.2-1733252108905:blk_1073741833_1009 {}] hdfs.DataStreamer(1731): Error Recovery for BP-619597313-172.17.0.2-1733252108905:blk_1073741833_1009 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36129,DS-77519873-4acf-4a3b-8908-9fb3fa1b39ff,DISK], DatanodeInfoWithStorage[127.0.0.1:40429,DS-faa8c47c-a6f5-4610-9c09-f1450308231b,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:40429,DS-faa8c47c-a6f5-4610-9c09-f1450308231b,DISK]) is bad. 2024-12-03T18:55:25,002 WARN [PacketResponder: BP-619597313-172.17.0.2-1733252108905:blk_1073741834_1010, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:40429] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Broken pipe at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T18:55:25,002 WARN [DataStreamer for file /user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.meta.1733252112663.meta block BP-619597313-172.17.0.2-1733252108905:blk_1073741834_1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-619597313-172.17.0.2-1733252108905:blk_1073741834_1010 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36129,DS-77519873-4acf-4a3b-8908-9fb3fa1b39ff,DISK], DatanodeInfoWithStorage[127.0.0.1:40429,DS-faa8c47c-a6f5-4610-9c09-f1450308231b,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:40429,DS-faa8c47c-a6f5-4610-9c09-f1450308231b,DISK]) is bad. 2024-12-03T18:55:25,001 WARN [DataStreamer for file /user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/MasterData/WALs/db5a5ccf5be8,39725,1733252111031/db5a5ccf5be8%2C39725%2C1733252111031.1733252111735 block BP-619597313-172.17.0.2-1733252108905:blk_1073741830_1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-619597313-172.17.0.2-1733252108905:blk_1073741830_1006 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40429,DS-faa8c47c-a6f5-4610-9c09-f1450308231b,DISK], DatanodeInfoWithStorage[127.0.0.1:36129,DS-77519873-4acf-4a3b-8908-9fb3fa1b39ff,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40429,DS-faa8c47c-a6f5-4610-9c09-f1450308231b,DISK]) is bad. 2024-12-03T18:55:25,002 WARN [PacketResponder: BP-619597313-172.17.0.2-1733252108905:blk_1073741837_1013, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:40429] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T18:55:25,004 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2108850015_22 at /127.0.0.1:46656 [Receiving block BP-619597313-172.17.0.2-1733252108905:blk_1073741837_1013] {}] datanode.DataXceiver(331): 127.0.0.1:36129:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:46656 dst: /127.0.0.1:36129 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T18:55:25,003 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1359548398_22 at /127.0.0.1:57628 [Receiving block BP-619597313-172.17.0.2-1733252108905:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:40429:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:57628 dst: /127.0.0.1:40429 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T18:55:25,004 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1037783469_22 at /127.0.0.1:46626 [Receiving block BP-619597313-172.17.0.2-1733252108905:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:36129:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:46626 dst: /127.0.0.1:36129 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T18:55:25,003 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1037783469_22 at /127.0.0.1:46612 [Receiving block BP-619597313-172.17.0.2-1733252108905:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:36129:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:46612 dst: /127.0.0.1:36129 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T18:55:25,005 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1037783469_22 at /127.0.0.1:42666 [Receiving block BP-619597313-172.17.0.2-1733252108905:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:40429:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:42666 dst: /127.0.0.1:40429 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T18:55:25,006 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1359548398_22 at /127.0.0.1:53928 [Receiving block BP-619597313-172.17.0.2-1733252108905:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:36129:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:53928 dst: /127.0.0.1:36129 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T18:55:25,006 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2108850015_22 at /127.0.0.1:42712 [Receiving block BP-619597313-172.17.0.2-1733252108905:blk_1073741837_1013] {}] datanode.DataXceiver(331): 127.0.0.1:40429:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:42712 dst: /127.0.0.1:40429 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T18:55:25,006 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@abbe752{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T18:55:25,007 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@42443481{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-03T18:55:25,007 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-03T18:55:25,007 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1037783469_22 at /127.0.0.1:42676 [Receiving block BP-619597313-172.17.0.2-1733252108905:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:40429:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:42676 dst: /127.0.0.1:40429 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T18:55:25,007 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3e10767c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-03T18:55:25,007 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2c64d82b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c4a60070-dd7e-802a-342e-5f20eae680fc/hadoop.log.dir/,STOPPED} 2024-12-03T18:55:25,009 WARN [BP-619597313-172.17.0.2-1733252108905 heartbeating to localhost/127.0.0.1:37681 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-03T18:55:25,009 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-03T18:55:25,009 WARN [BP-619597313-172.17.0.2-1733252108905 heartbeating to localhost/127.0.0.1:37681 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-619597313-172.17.0.2-1733252108905 (Datanode Uuid ff2ed10a-1a62-4254-b7ab-13ac5358207a) service to localhost/127.0.0.1:37681 2024-12-03T18:55:25,009 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-03T18:55:25,010 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c4a60070-dd7e-802a-342e-5f20eae680fc/cluster_4187d5fb-6995-465f-5c7c-0ffcd2fcb479/data/data3/current/BP-619597313-172.17.0.2-1733252108905 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T18:55:25,010 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c4a60070-dd7e-802a-342e-5f20eae680fc/cluster_4187d5fb-6995-465f-5c7c-0ffcd2fcb479/data/data4/current/BP-619597313-172.17.0.2-1733252108905 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T18:55:25,010 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-03T18:55:25,011 WARN [DataStreamer for file /user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.1733252112239 block BP-619597313-172.17.0.2-1733252108905:blk_1073741833_1009 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741833_1009 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T18:55:25,011 WARN [DataStreamer for file /user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,44293,1733252112865/db5a5ccf5be8%2C44293%2C1733252112865.1733252113091 block BP-619597313-172.17.0.2-1733252108905:blk_1073741837_1013 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741837_1013 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T18:55:25,011 WARN [DataStreamer for file /user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.meta.1733252112663.meta block BP-619597313-172.17.0.2-1733252108905:blk_1073741834_1010 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741834_1010 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T18:55:25,011 WARN [DataStreamer for file /user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/MasterData/WALs/db5a5ccf5be8,39725,1733252111031/db5a5ccf5be8%2C39725%2C1733252111031.1733252111735 block BP-619597313-172.17.0.2-1733252108905:blk_1073741830_1006 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741830_1006 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T18:55:25,013 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1a2936a3{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T18:55:25,013 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4a4bf55{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-03T18:55:25,013 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-03T18:55:25,013 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4743e1dd{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-03T18:55:25,013 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7a18c5e6{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c4a60070-dd7e-802a-342e-5f20eae680fc/hadoop.log.dir/,STOPPED} 2024-12-03T18:55:25,014 WARN [BP-619597313-172.17.0.2-1733252108905 heartbeating to localhost/127.0.0.1:37681 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-03T18:55:25,014 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-03T18:55:25,014 WARN [BP-619597313-172.17.0.2-1733252108905 heartbeating to localhost/127.0.0.1:37681 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-619597313-172.17.0.2-1733252108905 (Datanode Uuid a59547c2-5240-4f9b-b61e-f7cc2e5e9d66) service to localhost/127.0.0.1:37681 2024-12-03T18:55:25,014 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-03T18:55:25,014 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c4a60070-dd7e-802a-342e-5f20eae680fc/cluster_4187d5fb-6995-465f-5c7c-0ffcd2fcb479/data/data1/current/BP-619597313-172.17.0.2-1733252108905 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T18:55:25,015 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c4a60070-dd7e-802a-342e-5f20eae680fc/cluster_4187d5fb-6995-465f-5c7c-0ffcd2fcb479/data/data2/current/BP-619597313-172.17.0.2-1733252108905 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T18:55:25,015 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-03T18:55:25,018 DEBUG [RPCClient-NioEventLoopGroup-4-10 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRollOnDatanodeDeath', row='row0002', locateType=CURRENT is [region=TestLogRolling-testLogRollOnDatanodeDeath,,1733252112988.ffcdd949a24b91f4996ce458c5691d39., hostname=db5a5ccf5be8,46359,1733252111199, seqNum=2] 2024-12-03T18:55:25,019 ERROR [FSHLog-0-hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6-prefix:db5a5ccf5be8,46359,1733252111199 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36129,DS-77519873-4acf-4a3b-8908-9fb3fa1b39ff,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T18:55:25,020 WARN [FSHLog-0-hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6-prefix:db5a5ccf5be8,46359,1733252111199 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36129,DS-77519873-4acf-4a3b-8908-9fb3fa1b39ff,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T18:55:25,020 DEBUG [regionserver/db5a5ccf5be8:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog db5a5ccf5be8%2C46359%2C1733252111199:(num 1733252112239) roll requested 2024-12-03T18:55:25,020 INFO [regionserver/db5a5ccf5be8:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor db5a5ccf5be8%2C46359%2C1733252111199.1733252125020 2024-12-03T18:55:25,026 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:55:25,026 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:55:25,026 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:55:25,026 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:55:25,026 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:55:25,027 INFO [regionserver/db5a5ccf5be8:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.1733252112239 with entries=1, filesize=455 B; new WAL /user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.1733252125020 2024-12-03T18:55:25,027 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36129,DS-77519873-4acf-4a3b-8908-9fb3fa1b39ff,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T18:55:25,027 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36129,DS-77519873-4acf-4a3b-8908-9fb3fa1b39ff,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T18:55:25,028 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(47): Initialize RecoverLeaseFSUtils 2024-12-03T18:55:25,029 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(59): set recoverLeaseMethod to org.apache.hadoop.fs.LeaseRecoverable.recoverLease() 2024-12-03T18:55:25,029 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.1733252112239 2024-12-03T18:55:25,030 DEBUG [regionserver/db5a5ccf5be8:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44091:44091),(127.0.0.1/127.0.0.1:40425:40425)] 2024-12-03T18:55:25,030 DEBUG [regionserver/db5a5ccf5be8:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.1733252112239 is not closed yet, will try archiving it next time 2024-12-03T18:55:25,031 WARN [IPC Server handler 2 on default port 37681 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.1733252112239 has not been closed. Lease recovery is in progress. RecoveryId = 1019 for block blk_1073741833_1009 2024-12-03T18:55:25,034 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.1733252112239 after 4ms 2024-12-03T18:55:25,304 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36129,DS-77519873-4acf-4a3b-8908-9fb3fa1b39ff,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T18:55:26,960 INFO [regionserver/db5a5ccf5be8:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36129,DS-77519873-4acf-4a3b-8908-9fb3fa1b39ff,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T18:55:27,030 INFO [regionserver/db5a5ccf5be8:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36129,DS-77519873-4acf-4a3b-8908-9fb3fa1b39ff,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T18:55:27,031 INFO [Time-limited test {}] wal.TestLogRolling(261): log.getCurrentFileName(): hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.1733252125020 2024-12-03T18:55:27,033 WARN [ResponseProcessor for block BP-619597313-172.17.0.2-1733252108905:blk_1073741838_1018 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-619597313-172.17.0.2-1733252108905:blk_1073741838_1018 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T18:55:27,034 WARN [DataStreamer for file /user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.1733252125020 block BP-619597313-172.17.0.2-1733252108905:blk_1073741838_1018 {}] hdfs.DataStreamer(1731): Error Recovery for BP-619597313-172.17.0.2-1733252108905:blk_1073741838_1018 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38523,DS-415cd1d5-a105-4c92-b8f8-993dcb323415,DISK], DatanodeInfoWithStorage[127.0.0.1:44945,DS-65653d36-c9d6-40de-810f-8f14689aee01,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38523,DS-415cd1d5-a105-4c92-b8f8-993dcb323415,DISK]) is bad. 2024-12-03T18:55:27,035 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1037783469_22 at /127.0.0.1:51730 [Receiving block BP-619597313-172.17.0.2-1733252108905:blk_1073741838_1018] {}] datanode.DataXceiver(331): 127.0.0.1:38523:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51730 dst: /127.0.0.1:38523 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T18:55:27,036 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1037783469_22 at /127.0.0.1:43248 [Receiving block BP-619597313-172.17.0.2-1733252108905:blk_1073741838_1018] {}] datanode.DataXceiver(331): 127.0.0.1:44945:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:43248 dst: /127.0.0.1:44945 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T18:55:27,064 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3fd17220{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T18:55:27,064 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4c68f920{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-03T18:55:27,064 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-03T18:55:27,065 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2ab5b96c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-03T18:55:27,065 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@30008f24{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c4a60070-dd7e-802a-342e-5f20eae680fc/hadoop.log.dir/,STOPPED} 2024-12-03T18:55:27,067 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-03T18:55:27,067 WARN [BP-619597313-172.17.0.2-1733252108905 heartbeating to localhost/127.0.0.1:37681 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-03T18:55:27,067 WARN [BP-619597313-172.17.0.2-1733252108905 heartbeating to localhost/127.0.0.1:37681 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-619597313-172.17.0.2-1733252108905 (Datanode Uuid 749c500c-5395-4d83-978d-d9c295c6feba) service to localhost/127.0.0.1:37681 2024-12-03T18:55:27,067 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-03T18:55:27,068 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c4a60070-dd7e-802a-342e-5f20eae680fc/cluster_4187d5fb-6995-465f-5c7c-0ffcd2fcb479/data/data5/current/BP-619597313-172.17.0.2-1733252108905 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T18:55:27,068 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c4a60070-dd7e-802a-342e-5f20eae680fc/cluster_4187d5fb-6995-465f-5c7c-0ffcd2fcb479/data/data6/current/BP-619597313-172.17.0.2-1733252108905 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T18:55:27,068 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-03T18:55:27,305 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36129,DS-77519873-4acf-4a3b-8908-9fb3fa1b39ff,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T18:55:28,960 INFO [regionserver/db5a5ccf5be8:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36129,DS-77519873-4acf-4a3b-8908-9fb3fa1b39ff,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T18:55:29,031 INFO [regionserver/db5a5ccf5be8:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36129,DS-77519873-4acf-4a3b-8908-9fb3fa1b39ff,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T18:55:29,032 WARN [regionserver/db5a5ccf5be8:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44945,DS-65653d36-c9d6-40de-810f-8f14689aee01,DISK]] 2024-12-03T18:55:29,032 DEBUG [regionserver/db5a5ccf5be8:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog db5a5ccf5be8%2C46359%2C1733252111199:(num 1733252125020) roll requested 2024-12-03T18:55:29,032 INFO [regionserver/db5a5ccf5be8:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor db5a5ccf5be8%2C46359%2C1733252111199.1733252129032 2024-12-03T18:55:29,036 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.1733252112239 after 4007ms 2024-12-03T18:55:29,039 WARN [Thread-909 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741839_1021 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:36129 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T18:55:29,039 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1037783469_22 at /127.0.0.1:54284 [Receiving block BP-619597313-172.17.0.2-1733252108905:blk_1073741839_1021] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c4a60070-dd7e-802a-342e-5f20eae680fc/cluster_4187d5fb-6995-465f-5c7c-0ffcd2fcb479/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c4a60070-dd7e-802a-342e-5f20eae680fc/cluster_4187d5fb-6995-465f-5c7c-0ffcd2fcb479/data/data10]'}, localName='127.0.0.1:33593', datanodeUuid='ac0ce44c-5180-4526-a3b0-db40627349b1', xmitsInProgress=0}:Exception transferring block BP-619597313-172.17.0.2-1733252108905:blk_1073741839_1021 to mirror 127.0.0.1:36129 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T18:55:29,040 WARN [Thread-909 {}] hdfs.DataStreamer(1731): Error Recovery for BP-619597313-172.17.0.2-1733252108905:blk_1073741839_1021 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33593,DS-49a6f974-45fa-4a38-a4e1-bea744d45f50,DISK], DatanodeInfoWithStorage[127.0.0.1:36129,DS-77519873-4acf-4a3b-8908-9fb3fa1b39ff,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:36129,DS-77519873-4acf-4a3b-8908-9fb3fa1b39ff,DISK]) is bad. 2024-12-03T18:55:29,040 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1037783469_22 at /127.0.0.1:54284 [Receiving block BP-619597313-172.17.0.2-1733252108905:blk_1073741839_1021] {}] datanode.BlockReceiver(316): Block 1073741839 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-12-03T18:55:29,040 WARN [Thread-909 {}] hdfs.DataStreamer(1850): Abandoning BP-619597313-172.17.0.2-1733252108905:blk_1073741839_1021 2024-12-03T18:55:29,040 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1037783469_22 at /127.0.0.1:54284 [Receiving block BP-619597313-172.17.0.2-1733252108905:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:33593:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54284 dst: /127.0.0.1:33593 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T18:55:29,042 WARN [Thread-909 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36129,DS-77519873-4acf-4a3b-8908-9fb3fa1b39ff,DISK] 2024-12-03T18:55:29,045 WARN [Thread-909 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741840_1022 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T18:55:29,045 WARN [Thread-909 {}] hdfs.DataStreamer(1731): Error Recovery for BP-619597313-172.17.0.2-1733252108905:blk_1073741840_1022 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38523,DS-415cd1d5-a105-4c92-b8f8-993dcb323415,DISK], DatanodeInfoWithStorage[127.0.0.1:33593,DS-49a6f974-45fa-4a38-a4e1-bea744d45f50,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38523,DS-415cd1d5-a105-4c92-b8f8-993dcb323415,DISK]) is bad. 2024-12-03T18:55:29,045 WARN [Thread-909 {}] hdfs.DataStreamer(1850): Abandoning BP-619597313-172.17.0.2-1733252108905:blk_1073741840_1022 2024-12-03T18:55:29,046 WARN [Thread-909 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38523,DS-415cd1d5-a105-4c92-b8f8-993dcb323415,DISK] 2024-12-03T18:55:29,048 WARN [Thread-909 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741841_1023 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:40429 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T18:55:29,048 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1037783469_22 at /127.0.0.1:43254 [Receiving block BP-619597313-172.17.0.2-1733252108905:blk_1073741841_1023] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c4a60070-dd7e-802a-342e-5f20eae680fc/cluster_4187d5fb-6995-465f-5c7c-0ffcd2fcb479/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c4a60070-dd7e-802a-342e-5f20eae680fc/cluster_4187d5fb-6995-465f-5c7c-0ffcd2fcb479/data/data8]'}, localName='127.0.0.1:44945', datanodeUuid='e833ae0c-c540-41ef-8717-16fc4b94b63f', xmitsInProgress=0}:Exception transferring block BP-619597313-172.17.0.2-1733252108905:blk_1073741841_1023 to mirror 127.0.0.1:40429 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T18:55:29,048 WARN [Thread-909 {}] hdfs.DataStreamer(1731): Error Recovery for BP-619597313-172.17.0.2-1733252108905:blk_1073741841_1023 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44945,DS-65653d36-c9d6-40de-810f-8f14689aee01,DISK], DatanodeInfoWithStorage[127.0.0.1:40429,DS-faa8c47c-a6f5-4610-9c09-f1450308231b,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:40429,DS-faa8c47c-a6f5-4610-9c09-f1450308231b,DISK]) is bad. 2024-12-03T18:55:29,048 WARN [Thread-909 {}] hdfs.DataStreamer(1850): Abandoning BP-619597313-172.17.0.2-1733252108905:blk_1073741841_1023 2024-12-03T18:55:29,048 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1037783469_22 at /127.0.0.1:43254 [Receiving block BP-619597313-172.17.0.2-1733252108905:blk_1073741841_1023] {}] datanode.BlockReceiver(316): Block 1073741841 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-12-03T18:55:29,049 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1037783469_22 at /127.0.0.1:43254 [Receiving block BP-619597313-172.17.0.2-1733252108905:blk_1073741841_1023] {}] datanode.DataXceiver(331): 127.0.0.1:44945:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:43254 dst: /127.0.0.1:44945 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T18:55:29,049 WARN [Thread-909 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40429,DS-faa8c47c-a6f5-4610-9c09-f1450308231b,DISK] 2024-12-03T18:55:29,053 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:55:29,053 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:55:29,053 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:55:29,054 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:55:29,054 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:55:29,054 INFO [regionserver/db5a5ccf5be8:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.1733252125020 with entries=2, filesize=2.37 KB; new WAL /user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.1733252129032 2024-12-03T18:55:29,055 DEBUG [regionserver/db5a5ccf5be8:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40425:40425),(127.0.0.1/127.0.0.1:44371:44371)] 2024-12-03T18:55:29,055 DEBUG [regionserver/db5a5ccf5be8:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.1733252112239 is not closed yet, will try archiving it next time 2024-12-03T18:55:29,055 DEBUG [regionserver/db5a5ccf5be8:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.1733252125020 is not closed yet, will try archiving it next time 2024-12-03T18:55:29,056 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44945 is added to blk_1073741838_1020 (size=2431) 2024-12-03T18:55:29,074 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-12-03T18:55:29,306 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36129,DS-77519873-4acf-4a3b-8908-9fb3fa1b39ff,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T18:55:29,458 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.1733252112239 is not closed yet, will try archiving it next time 2024-12-03T18:55:30,800 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33593 is added to blk_1073741838_1020 (size=2431) 2024-12-03T18:55:30,961 INFO [regionserver/db5a5ccf5be8:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36129,DS-77519873-4acf-4a3b-8908-9fb3fa1b39ff,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T18:55:31,055 INFO [regionserver/db5a5ccf5be8:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36129,DS-77519873-4acf-4a3b-8908-9fb3fa1b39ff,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T18:55:31,077 WARN [ResponseProcessor for block BP-619597313-172.17.0.2-1733252108905:blk_1073741842_1024 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-619597313-172.17.0.2-1733252108905:blk_1073741842_1024 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T18:55:31,078 WARN [DataStreamer for file /user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.1733252129032 block BP-619597313-172.17.0.2-1733252108905:blk_1073741842_1024 {}] hdfs.DataStreamer(1731): Error Recovery for BP-619597313-172.17.0.2-1733252108905:blk_1073741842_1024 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44945,DS-65653d36-c9d6-40de-810f-8f14689aee01,DISK], DatanodeInfoWithStorage[127.0.0.1:33593,DS-49a6f974-45fa-4a38-a4e1-bea744d45f50,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44945,DS-65653d36-c9d6-40de-810f-8f14689aee01,DISK]) is bad. 2024-12-03T18:55:31,079 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1037783469_22 at /127.0.0.1:43256 [Receiving block BP-619597313-172.17.0.2-1733252108905:blk_1073741842_1024] {}] datanode.DataXceiver(331): 127.0.0.1:44945:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:43256 dst: /127.0.0.1:44945 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T18:55:31,080 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1037783469_22 at /127.0.0.1:54296 [Receiving block BP-619597313-172.17.0.2-1733252108905:blk_1073741842_1024] {}] datanode.DataXceiver(331): 127.0.0.1:33593:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54296 dst: /127.0.0.1:33593 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T18:55:31,082 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6d82aaea{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T18:55:31,082 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3904e150{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-03T18:55:31,082 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-03T18:55:31,083 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@64b89ed5{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-03T18:55:31,083 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3f85c2b2{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c4a60070-dd7e-802a-342e-5f20eae680fc/hadoop.log.dir/,STOPPED} 2024-12-03T18:55:31,084 WARN [BP-619597313-172.17.0.2-1733252108905 heartbeating to localhost/127.0.0.1:37681 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-03T18:55:31,084 WARN [BP-619597313-172.17.0.2-1733252108905 heartbeating to localhost/127.0.0.1:37681 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-619597313-172.17.0.2-1733252108905 (Datanode Uuid e833ae0c-c540-41ef-8717-16fc4b94b63f) service to localhost/127.0.0.1:37681 2024-12-03T18:55:31,085 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c4a60070-dd7e-802a-342e-5f20eae680fc/cluster_4187d5fb-6995-465f-5c7c-0ffcd2fcb479/data/data7/current/BP-619597313-172.17.0.2-1733252108905 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T18:55:31,085 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c4a60070-dd7e-802a-342e-5f20eae680fc/cluster_4187d5fb-6995-465f-5c7c-0ffcd2fcb479/data/data8/current/BP-619597313-172.17.0.2-1733252108905 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T18:55:31,085 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-03T18:55:31,085 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-03T18:55:31,085 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-03T18:55:31,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46359 {}] regionserver.HRegion(8855): Flush requested on ffcdd949a24b91f4996ce458c5691d39 2024-12-03T18:55:31,093 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing ffcdd949a24b91f4996ce458c5691d39 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-03T18:55:31,109 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ffcdd949a24b91f4996ce458c5691d39/.tmp/info/e99a68311ede435486014355f6dd200c is 1080, key is row0002/info:/1733252127070/Put/seqid=0 2024-12-03T18:55:31,111 WARN [Thread-924 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741843_1026 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T18:55:31,111 WARN [Thread-924 {}] hdfs.DataStreamer(1731): Error Recovery for BP-619597313-172.17.0.2-1733252108905:blk_1073741843_1026 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40429,DS-faa8c47c-a6f5-4610-9c09-f1450308231b,DISK], DatanodeInfoWithStorage[127.0.0.1:33593,DS-49a6f974-45fa-4a38-a4e1-bea744d45f50,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40429,DS-faa8c47c-a6f5-4610-9c09-f1450308231b,DISK]) is bad. 2024-12-03T18:55:31,111 WARN [Thread-924 {}] hdfs.DataStreamer(1850): Abandoning BP-619597313-172.17.0.2-1733252108905:blk_1073741843_1026 2024-12-03T18:55:31,112 WARN [Thread-924 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40429,DS-faa8c47c-a6f5-4610-9c09-f1450308231b,DISK] 2024-12-03T18:55:31,113 WARN [Thread-924 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741844_1027 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T18:55:31,113 WARN [Thread-924 {}] hdfs.DataStreamer(1731): Error Recovery for BP-619597313-172.17.0.2-1733252108905:blk_1073741844_1027 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44945,DS-65653d36-c9d6-40de-810f-8f14689aee01,DISK], DatanodeInfoWithStorage[127.0.0.1:33593,DS-49a6f974-45fa-4a38-a4e1-bea744d45f50,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44945,DS-65653d36-c9d6-40de-810f-8f14689aee01,DISK]) is bad. 2024-12-03T18:55:31,113 WARN [Thread-924 {}] hdfs.DataStreamer(1850): Abandoning BP-619597313-172.17.0.2-1733252108905:blk_1073741844_1027 2024-12-03T18:55:31,113 WARN [Thread-924 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44945,DS-65653d36-c9d6-40de-810f-8f14689aee01,DISK] 2024-12-03T18:55:31,115 WARN [Thread-924 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741845_1028 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:36129 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T18:55:31,115 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1037783469_22 at /127.0.0.1:54326 [Receiving block BP-619597313-172.17.0.2-1733252108905:blk_1073741845_1028] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c4a60070-dd7e-802a-342e-5f20eae680fc/cluster_4187d5fb-6995-465f-5c7c-0ffcd2fcb479/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c4a60070-dd7e-802a-342e-5f20eae680fc/cluster_4187d5fb-6995-465f-5c7c-0ffcd2fcb479/data/data10]'}, localName='127.0.0.1:33593', datanodeUuid='ac0ce44c-5180-4526-a3b0-db40627349b1', xmitsInProgress=0}:Exception transferring block BP-619597313-172.17.0.2-1733252108905:blk_1073741845_1028 to mirror 127.0.0.1:36129 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T18:55:31,115 WARN [Thread-924 {}] hdfs.DataStreamer(1731): Error Recovery for BP-619597313-172.17.0.2-1733252108905:blk_1073741845_1028 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33593,DS-49a6f974-45fa-4a38-a4e1-bea744d45f50,DISK], DatanodeInfoWithStorage[127.0.0.1:36129,DS-77519873-4acf-4a3b-8908-9fb3fa1b39ff,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:36129,DS-77519873-4acf-4a3b-8908-9fb3fa1b39ff,DISK]) is bad. 2024-12-03T18:55:31,115 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1037783469_22 at /127.0.0.1:54326 [Receiving block BP-619597313-172.17.0.2-1733252108905:blk_1073741845_1028] {}] datanode.BlockReceiver(316): Block 1073741845 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-03T18:55:31,115 WARN [Thread-924 {}] hdfs.DataStreamer(1850): Abandoning BP-619597313-172.17.0.2-1733252108905:blk_1073741845_1028 2024-12-03T18:55:31,116 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1037783469_22 at /127.0.0.1:54326 [Receiving block BP-619597313-172.17.0.2-1733252108905:blk_1073741845_1028] {}] datanode.DataXceiver(331): 127.0.0.1:33593:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54326 dst: /127.0.0.1:33593 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T18:55:31,116 WARN [Thread-924 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36129,DS-77519873-4acf-4a3b-8908-9fb3fa1b39ff,DISK] 2024-12-03T18:55:31,118 WARN [Thread-924 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741846_1029 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T18:55:31,118 WARN [Thread-924 {}] hdfs.DataStreamer(1731): Error Recovery for BP-619597313-172.17.0.2-1733252108905:blk_1073741846_1029 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38523,DS-415cd1d5-a105-4c92-b8f8-993dcb323415,DISK], DatanodeInfoWithStorage[127.0.0.1:33593,DS-49a6f974-45fa-4a38-a4e1-bea744d45f50,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38523,DS-415cd1d5-a105-4c92-b8f8-993dcb323415,DISK]) is bad. 2024-12-03T18:55:31,118 WARN [Thread-924 {}] hdfs.DataStreamer(1850): Abandoning BP-619597313-172.17.0.2-1733252108905:blk_1073741846_1029 2024-12-03T18:55:31,119 WARN [Thread-924 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38523,DS-415cd1d5-a105-4c92-b8f8-993dcb323415,DISK] 2024-12-03T18:55:31,119 WARN [IPC Server handler 2 on default port 37681 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-03T18:55:31,119 WARN [IPC Server handler 2 on default port 37681 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-03T18:55:31,120 WARN [IPC Server handler 2 on default port 37681 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-03T18:55:31,122 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33593 is added to blk_1073741847_1030 (size=10347) 2024-12-03T18:55:31,306 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36129,DS-77519873-4acf-4a3b-8908-9fb3fa1b39ff,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T18:55:31,524 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ffcdd949a24b91f4996ce458c5691d39/.tmp/info/e99a68311ede435486014355f6dd200c 2024-12-03T18:55:31,537 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ffcdd949a24b91f4996ce458c5691d39/.tmp/info/e99a68311ede435486014355f6dd200c as hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ffcdd949a24b91f4996ce458c5691d39/info/e99a68311ede435486014355f6dd200c 2024-12-03T18:55:31,544 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ffcdd949a24b91f4996ce458c5691d39/info/e99a68311ede435486014355f6dd200c, entries=5, sequenceid=11, filesize=10.1 K 2024-12-03T18:55:31,545 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=9.45 KB/9681 for ffcdd949a24b91f4996ce458c5691d39 in 452ms, sequenceid=11, compaction requested=false 2024-12-03T18:55:31,545 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for ffcdd949a24b91f4996ce458c5691d39: 2024-12-03T18:55:31,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46359 {}] regionserver.HRegion(8855): Flush requested on ffcdd949a24b91f4996ce458c5691d39 2024-12-03T18:55:31,724 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing ffcdd949a24b91f4996ce458c5691d39 1/1 column families, dataSize=10.50 KB heapSize=11.50 KB 2024-12-03T18:55:31,733 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ffcdd949a24b91f4996ce458c5691d39/.tmp/info/4033dc9a7bae4fb9956f9c39e55e8645 is 1080, key is row0007/info:/1733252131094/Put/seqid=0 2024-12-03T18:55:31,736 WARN [Thread-930 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741848_1031 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T18:55:31,736 WARN [Thread-930 {}] hdfs.DataStreamer(1731): Error Recovery for BP-619597313-172.17.0.2-1733252108905:blk_1073741848_1031 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40429,DS-faa8c47c-a6f5-4610-9c09-f1450308231b,DISK], DatanodeInfoWithStorage[127.0.0.1:36129,DS-77519873-4acf-4a3b-8908-9fb3fa1b39ff,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40429,DS-faa8c47c-a6f5-4610-9c09-f1450308231b,DISK]) is bad. 2024-12-03T18:55:31,736 WARN [Thread-930 {}] hdfs.DataStreamer(1850): Abandoning BP-619597313-172.17.0.2-1733252108905:blk_1073741848_1031 2024-12-03T18:55:31,737 WARN [Thread-930 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40429,DS-faa8c47c-a6f5-4610-9c09-f1450308231b,DISK] 2024-12-03T18:55:31,738 WARN [Thread-930 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741849_1032 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T18:55:31,738 WARN [Thread-930 {}] hdfs.DataStreamer(1731): Error Recovery for BP-619597313-172.17.0.2-1733252108905:blk_1073741849_1032 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36129,DS-77519873-4acf-4a3b-8908-9fb3fa1b39ff,DISK], DatanodeInfoWithStorage[127.0.0.1:38523,DS-415cd1d5-a105-4c92-b8f8-993dcb323415,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36129,DS-77519873-4acf-4a3b-8908-9fb3fa1b39ff,DISK]) is bad. 2024-12-03T18:55:31,738 WARN [Thread-930 {}] hdfs.DataStreamer(1850): Abandoning BP-619597313-172.17.0.2-1733252108905:blk_1073741849_1032 2024-12-03T18:55:31,739 WARN [Thread-930 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36129,DS-77519873-4acf-4a3b-8908-9fb3fa1b39ff,DISK] 2024-12-03T18:55:31,740 WARN [Thread-930 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741850_1033 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T18:55:31,741 WARN [Thread-930 {}] hdfs.DataStreamer(1731): Error Recovery for BP-619597313-172.17.0.2-1733252108905:blk_1073741850_1033 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44945,DS-65653d36-c9d6-40de-810f-8f14689aee01,DISK], DatanodeInfoWithStorage[127.0.0.1:33593,DS-49a6f974-45fa-4a38-a4e1-bea744d45f50,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44945,DS-65653d36-c9d6-40de-810f-8f14689aee01,DISK]) is bad. 2024-12-03T18:55:31,741 WARN [Thread-930 {}] hdfs.DataStreamer(1850): Abandoning BP-619597313-172.17.0.2-1733252108905:blk_1073741850_1033 2024-12-03T18:55:31,741 WARN [Thread-930 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44945,DS-65653d36-c9d6-40de-810f-8f14689aee01,DISK] 2024-12-03T18:55:31,744 WARN [Thread-930 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741851_1034 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:38523 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T18:55:31,744 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1037783469_22 at /127.0.0.1:54352 [Receiving block BP-619597313-172.17.0.2-1733252108905:blk_1073741851_1034] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c4a60070-dd7e-802a-342e-5f20eae680fc/cluster_4187d5fb-6995-465f-5c7c-0ffcd2fcb479/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c4a60070-dd7e-802a-342e-5f20eae680fc/cluster_4187d5fb-6995-465f-5c7c-0ffcd2fcb479/data/data10]'}, localName='127.0.0.1:33593', datanodeUuid='ac0ce44c-5180-4526-a3b0-db40627349b1', xmitsInProgress=0}:Exception transferring block BP-619597313-172.17.0.2-1733252108905:blk_1073741851_1034 to mirror 127.0.0.1:38523 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T18:55:31,744 WARN [Thread-930 {}] hdfs.DataStreamer(1731): Error Recovery for BP-619597313-172.17.0.2-1733252108905:blk_1073741851_1034 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33593,DS-49a6f974-45fa-4a38-a4e1-bea744d45f50,DISK], DatanodeInfoWithStorage[127.0.0.1:38523,DS-415cd1d5-a105-4c92-b8f8-993dcb323415,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:38523,DS-415cd1d5-a105-4c92-b8f8-993dcb323415,DISK]) is bad. 2024-12-03T18:55:31,744 WARN [Thread-930 {}] hdfs.DataStreamer(1850): Abandoning BP-619597313-172.17.0.2-1733252108905:blk_1073741851_1034 2024-12-03T18:55:31,744 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1037783469_22 at /127.0.0.1:54352 [Receiving block BP-619597313-172.17.0.2-1733252108905:blk_1073741851_1034] {}] datanode.BlockReceiver(316): Block 1073741851 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-03T18:55:31,744 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1037783469_22 at /127.0.0.1:54352 [Receiving block BP-619597313-172.17.0.2-1733252108905:blk_1073741851_1034] {}] datanode.DataXceiver(331): 127.0.0.1:33593:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54352 dst: /127.0.0.1:33593 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T18:55:31,745 WARN [Thread-930 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38523,DS-415cd1d5-a105-4c92-b8f8-993dcb323415,DISK] 2024-12-03T18:55:31,745 WARN [IPC Server handler 2 on default port 37681 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-03T18:55:31,745 WARN [IPC Server handler 2 on default port 37681 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-03T18:55:31,746 WARN [IPC Server handler 2 on default port 37681 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-03T18:55:31,749 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33593 is added to blk_1073741852_1035 (size=12506) 2024-12-03T18:55:32,150 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=10.50 KB at sequenceid=24 (bloomFilter=true), to=hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ffcdd949a24b91f4996ce458c5691d39/.tmp/info/4033dc9a7bae4fb9956f9c39e55e8645 2024-12-03T18:55:32,156 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ffcdd949a24b91f4996ce458c5691d39/.tmp/info/4033dc9a7bae4fb9956f9c39e55e8645 as hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ffcdd949a24b91f4996ce458c5691d39/info/4033dc9a7bae4fb9956f9c39e55e8645 2024-12-03T18:55:32,164 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ffcdd949a24b91f4996ce458c5691d39/info/4033dc9a7bae4fb9956f9c39e55e8645, entries=7, sequenceid=24, filesize=12.2 K 2024-12-03T18:55:32,165 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~10.50 KB/10756, heapSize ~11.48 KB/11760, currentSize=2.10 KB/2150 for ffcdd949a24b91f4996ce458c5691d39 in 441ms, sequenceid=24, compaction requested=false 2024-12-03T18:55:32,165 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for ffcdd949a24b91f4996ce458c5691d39: 2024-12-03T18:55:32,165 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=22.3 K, sizeToCheck=16.0 K 2024-12-03T18:55:32,166 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-03T18:55:32,166 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ffcdd949a24b91f4996ce458c5691d39/info/4033dc9a7bae4fb9956f9c39e55e8645 because midkey is the same as first or last row 2024-12-03T18:55:32,961 INFO [regionserver/db5a5ccf5be8:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36129,DS-77519873-4acf-4a3b-8908-9fb3fa1b39ff,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T18:55:33,056 INFO [regionserver/db5a5ccf5be8:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36129,DS-77519873-4acf-4a3b-8908-9fb3fa1b39ff,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T18:55:33,056 WARN [regionserver/db5a5ccf5be8:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33593,DS-49a6f974-45fa-4a38-a4e1-bea744d45f50,DISK]] 2024-12-03T18:55:33,056 DEBUG [regionserver/db5a5ccf5be8:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog db5a5ccf5be8%2C46359%2C1733252111199:(num 1733252129032) roll requested 2024-12-03T18:55:33,057 INFO [regionserver/db5a5ccf5be8:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor db5a5ccf5be8%2C46359%2C1733252111199.1733252133056 2024-12-03T18:55:33,061 WARN [Thread-935 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741853_1036 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:38523 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T18:55:33,061 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1037783469_22 at /127.0.0.1:36352 [Receiving block BP-619597313-172.17.0.2-1733252108905:blk_1073741853_1036] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c4a60070-dd7e-802a-342e-5f20eae680fc/cluster_4187d5fb-6995-465f-5c7c-0ffcd2fcb479/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c4a60070-dd7e-802a-342e-5f20eae680fc/cluster_4187d5fb-6995-465f-5c7c-0ffcd2fcb479/data/data10]'}, localName='127.0.0.1:33593', datanodeUuid='ac0ce44c-5180-4526-a3b0-db40627349b1', xmitsInProgress=0}:Exception transferring block BP-619597313-172.17.0.2-1733252108905:blk_1073741853_1036 to mirror 127.0.0.1:38523 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T18:55:33,061 WARN [Thread-935 {}] hdfs.DataStreamer(1731): Error Recovery for BP-619597313-172.17.0.2-1733252108905:blk_1073741853_1036 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33593,DS-49a6f974-45fa-4a38-a4e1-bea744d45f50,DISK], DatanodeInfoWithStorage[127.0.0.1:38523,DS-415cd1d5-a105-4c92-b8f8-993dcb323415,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:38523,DS-415cd1d5-a105-4c92-b8f8-993dcb323415,DISK]) is bad. 2024-12-03T18:55:33,061 WARN [Thread-935 {}] hdfs.DataStreamer(1850): Abandoning BP-619597313-172.17.0.2-1733252108905:blk_1073741853_1036 2024-12-03T18:55:33,061 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1037783469_22 at /127.0.0.1:36352 [Receiving block BP-619597313-172.17.0.2-1733252108905:blk_1073741853_1036] {}] datanode.BlockReceiver(316): Block 1073741853 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-12-03T18:55:33,061 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1037783469_22 at /127.0.0.1:36352 [Receiving block BP-619597313-172.17.0.2-1733252108905:blk_1073741853_1036] {}] datanode.DataXceiver(331): 127.0.0.1:33593:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36352 dst: /127.0.0.1:33593 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T18:55:33,062 WARN [Thread-935 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38523,DS-415cd1d5-a105-4c92-b8f8-993dcb323415,DISK] 2024-12-03T18:55:33,063 WARN [Thread-935 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741854_1037 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T18:55:33,064 WARN [Thread-935 {}] hdfs.DataStreamer(1731): Error Recovery for BP-619597313-172.17.0.2-1733252108905:blk_1073741854_1037 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36129,DS-77519873-4acf-4a3b-8908-9fb3fa1b39ff,DISK], DatanodeInfoWithStorage[127.0.0.1:40429,DS-faa8c47c-a6f5-4610-9c09-f1450308231b,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36129,DS-77519873-4acf-4a3b-8908-9fb3fa1b39ff,DISK]) is bad. 2024-12-03T18:55:33,064 WARN [Thread-935 {}] hdfs.DataStreamer(1850): Abandoning BP-619597313-172.17.0.2-1733252108905:blk_1073741854_1037 2024-12-03T18:55:33,064 WARN [Thread-935 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36129,DS-77519873-4acf-4a3b-8908-9fb3fa1b39ff,DISK] 2024-12-03T18:55:33,066 WARN [Thread-935 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741855_1038 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T18:55:33,066 WARN [Thread-935 {}] hdfs.DataStreamer(1731): Error Recovery for BP-619597313-172.17.0.2-1733252108905:blk_1073741855_1038 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44945,DS-65653d36-c9d6-40de-810f-8f14689aee01,DISK], DatanodeInfoWithStorage[127.0.0.1:40429,DS-faa8c47c-a6f5-4610-9c09-f1450308231b,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44945,DS-65653d36-c9d6-40de-810f-8f14689aee01,DISK]) is bad. 2024-12-03T18:55:33,066 WARN [Thread-935 {}] hdfs.DataStreamer(1850): Abandoning BP-619597313-172.17.0.2-1733252108905:blk_1073741855_1038 2024-12-03T18:55:33,067 WARN [Thread-935 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44945,DS-65653d36-c9d6-40de-810f-8f14689aee01,DISK] 2024-12-03T18:55:33,068 WARN [Thread-935 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741856_1039 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T18:55:33,068 WARN [Thread-935 {}] hdfs.DataStreamer(1731): Error Recovery for BP-619597313-172.17.0.2-1733252108905:blk_1073741856_1039 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40429,DS-faa8c47c-a6f5-4610-9c09-f1450308231b,DISK], DatanodeInfoWithStorage[127.0.0.1:33593,DS-49a6f974-45fa-4a38-a4e1-bea744d45f50,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40429,DS-faa8c47c-a6f5-4610-9c09-f1450308231b,DISK]) is bad. 2024-12-03T18:55:33,068 WARN [Thread-935 {}] hdfs.DataStreamer(1850): Abandoning BP-619597313-172.17.0.2-1733252108905:blk_1073741856_1039 2024-12-03T18:55:33,069 WARN [Thread-935 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40429,DS-faa8c47c-a6f5-4610-9c09-f1450308231b,DISK] 2024-12-03T18:55:33,070 WARN [IPC Server handler 1 on default port 37681 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-03T18:55:33,070 WARN [IPC Server handler 1 on default port 37681 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-03T18:55:33,070 WARN [IPC Server handler 1 on default port 37681 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-03T18:55:33,073 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:55:33,073 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:55:33,073 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:55:33,073 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:55:33,074 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:55:33,074 INFO [regionserver/db5a5ccf5be8:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.1733252129032 with entries=25, filesize=25.38 KB; new WAL /user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.1733252133056 2024-12-03T18:55:33,075 DEBUG [regionserver/db5a5ccf5be8:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44371:44371)] 2024-12-03T18:55:33,075 DEBUG [regionserver/db5a5ccf5be8:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.1733252112239 is not closed yet, will try archiving it next time 2024-12-03T18:55:33,075 DEBUG [regionserver/db5a5ccf5be8:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.1733252129032 is not closed yet, will try archiving it next time 2024-12-03T18:55:33,075 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.1733252125020 to hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/oldWALs/db5a5ccf5be8%2C46359%2C1733252111199.1733252125020 2024-12-03T18:55:33,076 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33593 is added to blk_1073741842_1025 (size=25992) 2024-12-03T18:55:33,077 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.1733252112239 is not closed yet, will try archiving it next time 2024-12-03T18:55:33,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46359 {}] regionserver.HRegion(8855): Flush requested on ffcdd949a24b91f4996ce458c5691d39 2024-12-03T18:55:33,161 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing ffcdd949a24b91f4996ce458c5691d39 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-12-03T18:55:33,170 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ffcdd949a24b91f4996ce458c5691d39/.tmp/info/868ad96ca8934859a6e00e6407b83e91 is 1079, key is tmprow/info:/1733252133159/Put/seqid=0 2024-12-03T18:55:33,173 WARN [Thread-940 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741858_1041 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T18:55:33,173 WARN [Thread-940 {}] hdfs.DataStreamer(1731): Error Recovery for BP-619597313-172.17.0.2-1733252108905:blk_1073741858_1041 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44945,DS-65653d36-c9d6-40de-810f-8f14689aee01,DISK], DatanodeInfoWithStorage[127.0.0.1:40429,DS-faa8c47c-a6f5-4610-9c09-f1450308231b,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44945,DS-65653d36-c9d6-40de-810f-8f14689aee01,DISK]) is bad. 2024-12-03T18:55:33,173 WARN [Thread-940 {}] hdfs.DataStreamer(1850): Abandoning BP-619597313-172.17.0.2-1733252108905:blk_1073741858_1041 2024-12-03T18:55:33,174 WARN [Thread-940 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44945,DS-65653d36-c9d6-40de-810f-8f14689aee01,DISK] 2024-12-03T18:55:33,177 WARN [Thread-940 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741859_1042 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:40429 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T18:55:33,177 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1037783469_22 at /127.0.0.1:36368 [Receiving block BP-619597313-172.17.0.2-1733252108905:blk_1073741859_1042] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c4a60070-dd7e-802a-342e-5f20eae680fc/cluster_4187d5fb-6995-465f-5c7c-0ffcd2fcb479/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c4a60070-dd7e-802a-342e-5f20eae680fc/cluster_4187d5fb-6995-465f-5c7c-0ffcd2fcb479/data/data10]'}, localName='127.0.0.1:33593', datanodeUuid='ac0ce44c-5180-4526-a3b0-db40627349b1', xmitsInProgress=0}:Exception transferring block BP-619597313-172.17.0.2-1733252108905:blk_1073741859_1042 to mirror 127.0.0.1:40429 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T18:55:33,177 WARN [Thread-940 {}] hdfs.DataStreamer(1731): Error Recovery for BP-619597313-172.17.0.2-1733252108905:blk_1073741859_1042 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33593,DS-49a6f974-45fa-4a38-a4e1-bea744d45f50,DISK], DatanodeInfoWithStorage[127.0.0.1:40429,DS-faa8c47c-a6f5-4610-9c09-f1450308231b,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:40429,DS-faa8c47c-a6f5-4610-9c09-f1450308231b,DISK]) is bad. 2024-12-03T18:55:33,177 WARN [Thread-940 {}] hdfs.DataStreamer(1850): Abandoning BP-619597313-172.17.0.2-1733252108905:blk_1073741859_1042 2024-12-03T18:55:33,177 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1037783469_22 at /127.0.0.1:36368 [Receiving block BP-619597313-172.17.0.2-1733252108905:blk_1073741859_1042] {}] datanode.BlockReceiver(316): Block 1073741859 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-03T18:55:33,177 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1037783469_22 at /127.0.0.1:36368 [Receiving block BP-619597313-172.17.0.2-1733252108905:blk_1073741859_1042] {}] datanode.DataXceiver(331): 127.0.0.1:33593:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36368 dst: /127.0.0.1:33593 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T18:55:33,178 WARN [Thread-940 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40429,DS-faa8c47c-a6f5-4610-9c09-f1450308231b,DISK] 2024-12-03T18:55:33,179 WARN [Thread-940 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741860_1043 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T18:55:33,180 WARN [Thread-940 {}] hdfs.DataStreamer(1731): Error Recovery for BP-619597313-172.17.0.2-1733252108905:blk_1073741860_1043 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36129,DS-77519873-4acf-4a3b-8908-9fb3fa1b39ff,DISK], DatanodeInfoWithStorage[127.0.0.1:38523,DS-415cd1d5-a105-4c92-b8f8-993dcb323415,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36129,DS-77519873-4acf-4a3b-8908-9fb3fa1b39ff,DISK]) is bad. 2024-12-03T18:55:33,180 WARN [Thread-940 {}] hdfs.DataStreamer(1850): Abandoning BP-619597313-172.17.0.2-1733252108905:blk_1073741860_1043 2024-12-03T18:55:33,180 WARN [Thread-940 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36129,DS-77519873-4acf-4a3b-8908-9fb3fa1b39ff,DISK] 2024-12-03T18:55:33,182 WARN [Thread-940 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741861_1044 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T18:55:33,182 WARN [Thread-940 {}] hdfs.DataStreamer(1731): Error Recovery for BP-619597313-172.17.0.2-1733252108905:blk_1073741861_1044 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38523,DS-415cd1d5-a105-4c92-b8f8-993dcb323415,DISK], DatanodeInfoWithStorage[127.0.0.1:33593,DS-49a6f974-45fa-4a38-a4e1-bea744d45f50,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38523,DS-415cd1d5-a105-4c92-b8f8-993dcb323415,DISK]) is bad. 2024-12-03T18:55:33,182 WARN [Thread-940 {}] hdfs.DataStreamer(1850): Abandoning BP-619597313-172.17.0.2-1733252108905:blk_1073741861_1044 2024-12-03T18:55:33,183 WARN [Thread-940 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38523,DS-415cd1d5-a105-4c92-b8f8-993dcb323415,DISK] 2024-12-03T18:55:33,184 WARN [IPC Server handler 2 on default port 37681 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-03T18:55:33,184 WARN [IPC Server handler 2 on default port 37681 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-03T18:55:33,184 WARN [IPC Server handler 2 on default port 37681 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-03T18:55:33,187 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33593 is added to blk_1073741862_1045 (size=6027) 2024-12-03T18:55:33,307 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36129,DS-77519873-4acf-4a3b-8908-9fb3fa1b39ff,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T18:55:33,588 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=34 (bloomFilter=true), to=hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ffcdd949a24b91f4996ce458c5691d39/.tmp/info/868ad96ca8934859a6e00e6407b83e91 2024-12-03T18:55:33,601 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ffcdd949a24b91f4996ce458c5691d39/.tmp/info/868ad96ca8934859a6e00e6407b83e91 as hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ffcdd949a24b91f4996ce458c5691d39/info/868ad96ca8934859a6e00e6407b83e91 2024-12-03T18:55:33,609 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ffcdd949a24b91f4996ce458c5691d39/info/868ad96ca8934859a6e00e6407b83e91, entries=1, sequenceid=34, filesize=5.9 K 2024-12-03T18:55:33,610 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for ffcdd949a24b91f4996ce458c5691d39 in 449ms, sequenceid=34, compaction requested=true 2024-12-03T18:55:33,610 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for ffcdd949a24b91f4996ce458c5691d39: 2024-12-03T18:55:33,610 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=28.2 K, sizeToCheck=16.0 K 2024-12-03T18:55:33,610 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-03T18:55:33,610 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ffcdd949a24b91f4996ce458c5691d39/info/4033dc9a7bae4fb9956f9c39e55e8645 because midkey is the same as first or last row 2024-12-03T18:55:33,611 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ffcdd949a24b91f4996ce458c5691d39:info, priority=-2147483648, current under compaction store size is 1 2024-12-03T18:55:33,611 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T18:55:33,611 DEBUG [RS:0;db5a5ccf5be8:46359-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-03T18:55:33,613 DEBUG [RS:0;db5a5ccf5be8:46359-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 28880 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-03T18:55:33,613 DEBUG [RS:0;db5a5ccf5be8:46359-shortCompactions-0 {}] regionserver.HStore(1541): ffcdd949a24b91f4996ce458c5691d39/info is initiating minor compaction (all files) 2024-12-03T18:55:33,613 INFO [RS:0;db5a5ccf5be8:46359-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of ffcdd949a24b91f4996ce458c5691d39/info in TestLogRolling-testLogRollOnDatanodeDeath,,1733252112988.ffcdd949a24b91f4996ce458c5691d39. 2024-12-03T18:55:33,613 INFO [RS:0;db5a5ccf5be8:46359-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ffcdd949a24b91f4996ce458c5691d39/info/e99a68311ede435486014355f6dd200c, hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ffcdd949a24b91f4996ce458c5691d39/info/4033dc9a7bae4fb9956f9c39e55e8645, hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ffcdd949a24b91f4996ce458c5691d39/info/868ad96ca8934859a6e00e6407b83e91] into tmpdir=hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ffcdd949a24b91f4996ce458c5691d39/.tmp, totalSize=28.2 K 2024-12-03T18:55:33,614 DEBUG [RS:0;db5a5ccf5be8:46359-shortCompactions-0 {}] compactions.Compactor(225): Compacting e99a68311ede435486014355f6dd200c, keycount=5, bloomtype=ROW, size=10.1 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1733252127070 2024-12-03T18:55:33,614 DEBUG [RS:0;db5a5ccf5be8:46359-shortCompactions-0 {}] compactions.Compactor(225): Compacting 4033dc9a7bae4fb9956f9c39e55e8645, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=24, earliestPutTs=1733252131094 2024-12-03T18:55:33,615 DEBUG [RS:0;db5a5ccf5be8:46359-shortCompactions-0 {}] compactions.Compactor(225): Compacting 868ad96ca8934859a6e00e6407b83e91, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=34, earliestPutTs=1733252133159 2024-12-03T18:55:33,629 INFO [RS:0;db5a5ccf5be8:46359-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ffcdd949a24b91f4996ce458c5691d39#info#compaction#21 average throughput is 12.31 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-03T18:55:33,629 DEBUG [RS:0;db5a5ccf5be8:46359-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ffcdd949a24b91f4996ce458c5691d39/.tmp/info/e5666c7d62914286ab14a862b04d02d7 is 1080, key is row0002/info:/1733252127070/Put/seqid=0 2024-12-03T18:55:33,631 WARN [Thread-947 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741863_1046 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T18:55:33,631 WARN [Thread-947 {}] hdfs.DataStreamer(1731): Error Recovery for BP-619597313-172.17.0.2-1733252108905:blk_1073741863_1046 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44945,DS-65653d36-c9d6-40de-810f-8f14689aee01,DISK], DatanodeInfoWithStorage[127.0.0.1:36129,DS-77519873-4acf-4a3b-8908-9fb3fa1b39ff,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44945,DS-65653d36-c9d6-40de-810f-8f14689aee01,DISK]) is bad. 2024-12-03T18:55:33,631 WARN [Thread-947 {}] hdfs.DataStreamer(1850): Abandoning BP-619597313-172.17.0.2-1733252108905:blk_1073741863_1046 2024-12-03T18:55:33,631 WARN [Thread-947 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44945,DS-65653d36-c9d6-40de-810f-8f14689aee01,DISK] 2024-12-03T18:55:33,633 WARN [Thread-947 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741864_1047 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T18:55:33,633 WARN [Thread-947 {}] hdfs.DataStreamer(1731): Error Recovery for BP-619597313-172.17.0.2-1733252108905:blk_1073741864_1047 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40429,DS-faa8c47c-a6f5-4610-9c09-f1450308231b,DISK], DatanodeInfoWithStorage[127.0.0.1:33593,DS-49a6f974-45fa-4a38-a4e1-bea744d45f50,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40429,DS-faa8c47c-a6f5-4610-9c09-f1450308231b,DISK]) is bad. 2024-12-03T18:55:33,633 WARN [Thread-947 {}] hdfs.DataStreamer(1850): Abandoning BP-619597313-172.17.0.2-1733252108905:blk_1073741864_1047 2024-12-03T18:55:33,633 WARN [Thread-947 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40429,DS-faa8c47c-a6f5-4610-9c09-f1450308231b,DISK] 2024-12-03T18:55:33,635 WARN [Thread-947 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741865_1048 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:36129 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T18:55:33,635 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1037783469_22 at /127.0.0.1:36416 [Receiving block BP-619597313-172.17.0.2-1733252108905:blk_1073741865_1048] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c4a60070-dd7e-802a-342e-5f20eae680fc/cluster_4187d5fb-6995-465f-5c7c-0ffcd2fcb479/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c4a60070-dd7e-802a-342e-5f20eae680fc/cluster_4187d5fb-6995-465f-5c7c-0ffcd2fcb479/data/data10]'}, localName='127.0.0.1:33593', datanodeUuid='ac0ce44c-5180-4526-a3b0-db40627349b1', xmitsInProgress=0}:Exception transferring block BP-619597313-172.17.0.2-1733252108905:blk_1073741865_1048 to mirror 127.0.0.1:36129 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T18:55:33,635 WARN [Thread-947 {}] hdfs.DataStreamer(1731): Error Recovery for BP-619597313-172.17.0.2-1733252108905:blk_1073741865_1048 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33593,DS-49a6f974-45fa-4a38-a4e1-bea744d45f50,DISK], DatanodeInfoWithStorage[127.0.0.1:36129,DS-77519873-4acf-4a3b-8908-9fb3fa1b39ff,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:36129,DS-77519873-4acf-4a3b-8908-9fb3fa1b39ff,DISK]) is bad. 2024-12-03T18:55:33,635 WARN [Thread-947 {}] hdfs.DataStreamer(1850): Abandoning BP-619597313-172.17.0.2-1733252108905:blk_1073741865_1048 2024-12-03T18:55:33,635 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1037783469_22 at /127.0.0.1:36416 [Receiving block BP-619597313-172.17.0.2-1733252108905:blk_1073741865_1048] {}] datanode.BlockReceiver(316): Block 1073741865 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-03T18:55:33,636 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1037783469_22 at /127.0.0.1:36416 [Receiving block BP-619597313-172.17.0.2-1733252108905:blk_1073741865_1048] {}] datanode.DataXceiver(331): 127.0.0.1:33593:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36416 dst: /127.0.0.1:33593 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T18:55:33,636 WARN [Thread-947 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36129,DS-77519873-4acf-4a3b-8908-9fb3fa1b39ff,DISK] 2024-12-03T18:55:33,638 WARN [Thread-947 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741866_1049 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:38523 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T18:55:33,638 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1037783469_22 at /127.0.0.1:36418 [Receiving block BP-619597313-172.17.0.2-1733252108905:blk_1073741866_1049] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c4a60070-dd7e-802a-342e-5f20eae680fc/cluster_4187d5fb-6995-465f-5c7c-0ffcd2fcb479/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c4a60070-dd7e-802a-342e-5f20eae680fc/cluster_4187d5fb-6995-465f-5c7c-0ffcd2fcb479/data/data10]'}, localName='127.0.0.1:33593', datanodeUuid='ac0ce44c-5180-4526-a3b0-db40627349b1', xmitsInProgress=0}:Exception transferring block BP-619597313-172.17.0.2-1733252108905:blk_1073741866_1049 to mirror 127.0.0.1:38523 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T18:55:33,638 WARN [Thread-947 {}] hdfs.DataStreamer(1731): Error Recovery for BP-619597313-172.17.0.2-1733252108905:blk_1073741866_1049 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33593,DS-49a6f974-45fa-4a38-a4e1-bea744d45f50,DISK], DatanodeInfoWithStorage[127.0.0.1:38523,DS-415cd1d5-a105-4c92-b8f8-993dcb323415,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:38523,DS-415cd1d5-a105-4c92-b8f8-993dcb323415,DISK]) is bad. 2024-12-03T18:55:33,638 WARN [Thread-947 {}] hdfs.DataStreamer(1850): Abandoning BP-619597313-172.17.0.2-1733252108905:blk_1073741866_1049 2024-12-03T18:55:33,638 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1037783469_22 at /127.0.0.1:36418 [Receiving block BP-619597313-172.17.0.2-1733252108905:blk_1073741866_1049] {}] datanode.BlockReceiver(316): Block 1073741866 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-03T18:55:33,639 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1037783469_22 at /127.0.0.1:36418 [Receiving block BP-619597313-172.17.0.2-1733252108905:blk_1073741866_1049] {}] datanode.DataXceiver(331): 127.0.0.1:33593:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36418 dst: /127.0.0.1:33593 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T18:55:33,639 WARN [Thread-947 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38523,DS-415cd1d5-a105-4c92-b8f8-993dcb323415,DISK] 2024-12-03T18:55:33,640 WARN [IPC Server handler 0 on default port 37681 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-03T18:55:33,640 WARN [IPC Server handler 0 on default port 37681 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-03T18:55:33,640 WARN [IPC Server handler 0 on default port 37681 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-03T18:55:33,642 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33593 is added to blk_1073741867_1050 (size=17994) 2024-12-03T18:55:33,922 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@ef50e6e[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:33593, datanodeUuid=ac0ce44c-5180-4526-a3b0-db40627349b1, infoPort=44371, infoSecurePort=0, ipcPort=42883, storageInfo=lv=-57;cid=testClusterID;nsid=1292028406;c=1733252108905):Failed to transfer BP-619597313-172.17.0.2-1733252108905:blk_1073741847_1030 to 127.0.0.1:38523 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T18:55:33,922 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@aca8b68[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:33593, datanodeUuid=ac0ce44c-5180-4526-a3b0-db40627349b1, infoPort=44371, infoSecurePort=0, ipcPort=42883, storageInfo=lv=-57;cid=testClusterID;nsid=1292028406;c=1733252108905):Failed to transfer BP-619597313-172.17.0.2-1733252108905:blk_1073741852_1035 to 127.0.0.1:36129 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T18:55:34,053 DEBUG [RS:0;db5a5ccf5be8:46359-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ffcdd949a24b91f4996ce458c5691d39/.tmp/info/e5666c7d62914286ab14a862b04d02d7 as hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ffcdd949a24b91f4996ce458c5691d39/info/e5666c7d62914286ab14a862b04d02d7 2024-12-03T18:55:34,060 INFO [RS:0;db5a5ccf5be8:46359-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in ffcdd949a24b91f4996ce458c5691d39/info of ffcdd949a24b91f4996ce458c5691d39 into e5666c7d62914286ab14a862b04d02d7(size=17.6 K), total size for store is 17.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T18:55:34,061 DEBUG [RS:0;db5a5ccf5be8:46359-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for ffcdd949a24b91f4996ce458c5691d39: 2024-12-03T18:55:34,061 INFO [RS:0;db5a5ccf5be8:46359-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRollOnDatanodeDeath,,1733252112988.ffcdd949a24b91f4996ce458c5691d39., storeName=ffcdd949a24b91f4996ce458c5691d39/info, priority=13, startTime=1733252133610; duration=0sec 2024-12-03T18:55:34,061 DEBUG [RS:0;db5a5ccf5be8:46359-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-12-03T18:55:34,061 DEBUG [RS:0;db5a5ccf5be8:46359-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-03T18:55:34,061 DEBUG [RS:0;db5a5ccf5be8:46359-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ffcdd949a24b91f4996ce458c5691d39/info/e5666c7d62914286ab14a862b04d02d7 because midkey is the same as first or last row 2024-12-03T18:55:34,061 DEBUG [RS:0;db5a5ccf5be8:46359-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-12-03T18:55:34,061 DEBUG [RS:0;db5a5ccf5be8:46359-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-03T18:55:34,061 DEBUG [RS:0;db5a5ccf5be8:46359-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ffcdd949a24b91f4996ce458c5691d39/info/e5666c7d62914286ab14a862b04d02d7 because midkey is the same as first or last row 2024-12-03T18:55:34,061 DEBUG [RS:0;db5a5ccf5be8:46359-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-12-03T18:55:34,061 DEBUG [RS:0;db5a5ccf5be8:46359-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-03T18:55:34,061 DEBUG [RS:0;db5a5ccf5be8:46359-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ffcdd949a24b91f4996ce458c5691d39/info/e5666c7d62914286ab14a862b04d02d7 because midkey is the same as first or last row 2024-12-03T18:55:34,061 DEBUG [RS:0;db5a5ccf5be8:46359-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T18:55:34,061 DEBUG [RS:0;db5a5ccf5be8:46359-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ffcdd949a24b91f4996ce458c5691d39:info 2024-12-03T18:55:34,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46359 {}] regionserver.HRegion(8855): Flush requested on ffcdd949a24b91f4996ce458c5691d39 2024-12-03T18:55:34,589 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing ffcdd949a24b91f4996ce458c5691d39 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-12-03T18:55:34,594 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ffcdd949a24b91f4996ce458c5691d39/.tmp/info/36bbcc6c3bb64f4aa817826c86b5c400 is 1079, key is tmprow/info:/1733252134588/Put/seqid=0 2024-12-03T18:55:34,596 WARN [Thread-956 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741868_1051 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T18:55:34,596 WARN [Thread-956 {}] hdfs.DataStreamer(1731): Error Recovery for BP-619597313-172.17.0.2-1733252108905:blk_1073741868_1051 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38523,DS-415cd1d5-a105-4c92-b8f8-993dcb323415,DISK], DatanodeInfoWithStorage[127.0.0.1:44945,DS-65653d36-c9d6-40de-810f-8f14689aee01,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38523,DS-415cd1d5-a105-4c92-b8f8-993dcb323415,DISK]) is bad. 2024-12-03T18:55:34,596 WARN [Thread-956 {}] hdfs.DataStreamer(1850): Abandoning BP-619597313-172.17.0.2-1733252108905:blk_1073741868_1051 2024-12-03T18:55:34,597 WARN [Thread-956 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38523,DS-415cd1d5-a105-4c92-b8f8-993dcb323415,DISK] 2024-12-03T18:55:34,600 WARN [Thread-956 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741869_1052 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:44945 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T18:55:34,600 WARN [Thread-956 {}] hdfs.DataStreamer(1731): Error Recovery for BP-619597313-172.17.0.2-1733252108905:blk_1073741869_1052 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33593,DS-49a6f974-45fa-4a38-a4e1-bea744d45f50,DISK], DatanodeInfoWithStorage[127.0.0.1:44945,DS-65653d36-c9d6-40de-810f-8f14689aee01,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:44945,DS-65653d36-c9d6-40de-810f-8f14689aee01,DISK]) is bad. 2024-12-03T18:55:34,600 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1037783469_22 at /127.0.0.1:36428 [Receiving block BP-619597313-172.17.0.2-1733252108905:blk_1073741869_1052] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c4a60070-dd7e-802a-342e-5f20eae680fc/cluster_4187d5fb-6995-465f-5c7c-0ffcd2fcb479/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c4a60070-dd7e-802a-342e-5f20eae680fc/cluster_4187d5fb-6995-465f-5c7c-0ffcd2fcb479/data/data10]'}, localName='127.0.0.1:33593', datanodeUuid='ac0ce44c-5180-4526-a3b0-db40627349b1', xmitsInProgress=0}:Exception transferring block BP-619597313-172.17.0.2-1733252108905:blk_1073741869_1052 to mirror 127.0.0.1:44945 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T18:55:34,600 WARN [Thread-956 {}] hdfs.DataStreamer(1850): Abandoning BP-619597313-172.17.0.2-1733252108905:blk_1073741869_1052 2024-12-03T18:55:34,600 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1037783469_22 at /127.0.0.1:36428 [Receiving block BP-619597313-172.17.0.2-1733252108905:blk_1073741869_1052] {}] datanode.BlockReceiver(316): Block 1073741869 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-03T18:55:34,600 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1037783469_22 at /127.0.0.1:36428 [Receiving block BP-619597313-172.17.0.2-1733252108905:blk_1073741869_1052] {}] datanode.DataXceiver(331): 127.0.0.1:33593:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36428 dst: /127.0.0.1:33593 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T18:55:34,601 WARN [Thread-956 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44945,DS-65653d36-c9d6-40de-810f-8f14689aee01,DISK] 2024-12-03T18:55:34,603 WARN [Thread-956 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741870_1053 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:36129 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T18:55:34,603 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1037783469_22 at /127.0.0.1:36432 [Receiving block BP-619597313-172.17.0.2-1733252108905:blk_1073741870_1053] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c4a60070-dd7e-802a-342e-5f20eae680fc/cluster_4187d5fb-6995-465f-5c7c-0ffcd2fcb479/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c4a60070-dd7e-802a-342e-5f20eae680fc/cluster_4187d5fb-6995-465f-5c7c-0ffcd2fcb479/data/data10]'}, localName='127.0.0.1:33593', datanodeUuid='ac0ce44c-5180-4526-a3b0-db40627349b1', xmitsInProgress=0}:Exception transferring block BP-619597313-172.17.0.2-1733252108905:blk_1073741870_1053 to mirror 127.0.0.1:36129 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T18:55:34,603 WARN [Thread-956 {}] hdfs.DataStreamer(1731): Error Recovery for BP-619597313-172.17.0.2-1733252108905:blk_1073741870_1053 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33593,DS-49a6f974-45fa-4a38-a4e1-bea744d45f50,DISK], DatanodeInfoWithStorage[127.0.0.1:36129,DS-77519873-4acf-4a3b-8908-9fb3fa1b39ff,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:36129,DS-77519873-4acf-4a3b-8908-9fb3fa1b39ff,DISK]) is bad. 2024-12-03T18:55:34,604 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1037783469_22 at /127.0.0.1:36432 [Receiving block BP-619597313-172.17.0.2-1733252108905:blk_1073741870_1053] {}] datanode.BlockReceiver(316): Block 1073741870 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-03T18:55:34,604 WARN [Thread-956 {}] hdfs.DataStreamer(1850): Abandoning BP-619597313-172.17.0.2-1733252108905:blk_1073741870_1053 2024-12-03T18:55:34,604 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1037783469_22 at /127.0.0.1:36432 [Receiving block BP-619597313-172.17.0.2-1733252108905:blk_1073741870_1053] {}] datanode.DataXceiver(331): 127.0.0.1:33593:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36432 dst: /127.0.0.1:33593 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T18:55:34,604 WARN [Thread-956 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36129,DS-77519873-4acf-4a3b-8908-9fb3fa1b39ff,DISK] 2024-12-03T18:55:34,606 WARN [Thread-956 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741871_1054 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T18:55:34,606 WARN [Thread-956 {}] hdfs.DataStreamer(1731): Error Recovery for BP-619597313-172.17.0.2-1733252108905:blk_1073741871_1054 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40429,DS-faa8c47c-a6f5-4610-9c09-f1450308231b,DISK], DatanodeInfoWithStorage[127.0.0.1:33593,DS-49a6f974-45fa-4a38-a4e1-bea744d45f50,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40429,DS-faa8c47c-a6f5-4610-9c09-f1450308231b,DISK]) is bad. 2024-12-03T18:55:34,606 WARN [Thread-956 {}] hdfs.DataStreamer(1850): Abandoning BP-619597313-172.17.0.2-1733252108905:blk_1073741871_1054 2024-12-03T18:55:34,606 WARN [Thread-956 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40429,DS-faa8c47c-a6f5-4610-9c09-f1450308231b,DISK] 2024-12-03T18:55:34,607 WARN [IPC Server handler 1 on default port 37681 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-03T18:55:34,607 WARN [IPC Server handler 1 on default port 37681 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-03T18:55:34,607 WARN [IPC Server handler 1 on default port 37681 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-03T18:55:34,610 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33593 is added to blk_1073741872_1055 (size=6027) 2024-12-03T18:55:34,922 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@ef50e6e[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:33593, datanodeUuid=ac0ce44c-5180-4526-a3b0-db40627349b1, infoPort=44371, infoSecurePort=0, ipcPort=42883, storageInfo=lv=-57;cid=testClusterID;nsid=1292028406;c=1733252108905):Failed to transfer BP-619597313-172.17.0.2-1733252108905:blk_1073741862_1045 to 127.0.0.1:38523 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T18:55:34,922 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@aca8b68[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:33593, datanodeUuid=ac0ce44c-5180-4526-a3b0-db40627349b1, infoPort=44371, infoSecurePort=0, ipcPort=42883, storageInfo=lv=-57;cid=testClusterID;nsid=1292028406;c=1733252108905):Failed to transfer BP-619597313-172.17.0.2-1733252108905:blk_1073741842_1025 to 127.0.0.1:36129 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T18:55:34,962 INFO [regionserver/db5a5ccf5be8:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36129,DS-77519873-4acf-4a3b-8908-9fb3fa1b39ff,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T18:55:35,011 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=45 (bloomFilter=true), to=hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ffcdd949a24b91f4996ce458c5691d39/.tmp/info/36bbcc6c3bb64f4aa817826c86b5c400 2024-12-03T18:55:35,024 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ffcdd949a24b91f4996ce458c5691d39/.tmp/info/36bbcc6c3bb64f4aa817826c86b5c400 as hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ffcdd949a24b91f4996ce458c5691d39/info/36bbcc6c3bb64f4aa817826c86b5c400 2024-12-03T18:55:35,030 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ffcdd949a24b91f4996ce458c5691d39/info/36bbcc6c3bb64f4aa817826c86b5c400, entries=1, sequenceid=45, filesize=5.9 K 2024-12-03T18:55:35,031 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for ffcdd949a24b91f4996ce458c5691d39 in 442ms, sequenceid=45, compaction requested=false 2024-12-03T18:55:35,031 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for ffcdd949a24b91f4996ce458c5691d39: 2024-12-03T18:55:35,031 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=23.5 K, sizeToCheck=16.0 K 2024-12-03T18:55:35,031 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-03T18:55:35,032 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ffcdd949a24b91f4996ce458c5691d39/info/e5666c7d62914286ab14a862b04d02d7 because midkey is the same as first or last row 2024-12-03T18:55:35,075 INFO [regionserver/db5a5ccf5be8:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36129,DS-77519873-4acf-4a3b-8908-9fb3fa1b39ff,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T18:55:35,076 WARN [regionserver/db5a5ccf5be8:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33593,DS-49a6f974-45fa-4a38-a4e1-bea744d45f50,DISK]] 2024-12-03T18:55:35,076 DEBUG [regionserver/db5a5ccf5be8:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog db5a5ccf5be8%2C46359%2C1733252111199:(num 1733252133056) roll requested 2024-12-03T18:55:35,076 INFO [regionserver/db5a5ccf5be8:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor db5a5ccf5be8%2C46359%2C1733252111199.1733252135076 2024-12-03T18:55:35,081 WARN [Thread-962 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741873_1056 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T18:55:35,081 WARN [Thread-962 {}] hdfs.DataStreamer(1731): Error Recovery for BP-619597313-172.17.0.2-1733252108905:blk_1073741873_1056 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36129,DS-77519873-4acf-4a3b-8908-9fb3fa1b39ff,DISK], DatanodeInfoWithStorage[127.0.0.1:33593,DS-49a6f974-45fa-4a38-a4e1-bea744d45f50,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36129,DS-77519873-4acf-4a3b-8908-9fb3fa1b39ff,DISK]) is bad. 2024-12-03T18:55:35,081 WARN [Thread-962 {}] hdfs.DataStreamer(1850): Abandoning BP-619597313-172.17.0.2-1733252108905:blk_1073741873_1056 2024-12-03T18:55:35,082 WARN [Thread-962 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36129,DS-77519873-4acf-4a3b-8908-9fb3fa1b39ff,DISK] 2024-12-03T18:55:35,084 WARN [Thread-962 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741874_1057 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T18:55:35,085 WARN [Thread-962 {}] hdfs.DataStreamer(1731): Error Recovery for BP-619597313-172.17.0.2-1733252108905:blk_1073741874_1057 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44945,DS-65653d36-c9d6-40de-810f-8f14689aee01,DISK], DatanodeInfoWithStorage[127.0.0.1:38523,DS-415cd1d5-a105-4c92-b8f8-993dcb323415,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44945,DS-65653d36-c9d6-40de-810f-8f14689aee01,DISK]) is bad. 2024-12-03T18:55:35,085 WARN [Thread-962 {}] hdfs.DataStreamer(1850): Abandoning BP-619597313-172.17.0.2-1733252108905:blk_1073741874_1057 2024-12-03T18:55:35,086 WARN [Thread-962 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44945,DS-65653d36-c9d6-40de-810f-8f14689aee01,DISK] 2024-12-03T18:55:35,088 WARN [Thread-962 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741875_1058 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T18:55:35,088 WARN [Thread-962 {}] hdfs.DataStreamer(1731): Error Recovery for BP-619597313-172.17.0.2-1733252108905:blk_1073741875_1058 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40429,DS-faa8c47c-a6f5-4610-9c09-f1450308231b,DISK], DatanodeInfoWithStorage[127.0.0.1:38523,DS-415cd1d5-a105-4c92-b8f8-993dcb323415,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40429,DS-faa8c47c-a6f5-4610-9c09-f1450308231b,DISK]) is bad. 2024-12-03T18:55:35,088 WARN [Thread-962 {}] hdfs.DataStreamer(1850): Abandoning BP-619597313-172.17.0.2-1733252108905:blk_1073741875_1058 2024-12-03T18:55:35,089 WARN [Thread-962 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40429,DS-faa8c47c-a6f5-4610-9c09-f1450308231b,DISK] 2024-12-03T18:55:35,092 WARN [Thread-962 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741876_1059 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:38523 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T18:55:35,092 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1037783469_22 at /127.0.0.1:36460 [Receiving block BP-619597313-172.17.0.2-1733252108905:blk_1073741876_1059] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c4a60070-dd7e-802a-342e-5f20eae680fc/cluster_4187d5fb-6995-465f-5c7c-0ffcd2fcb479/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c4a60070-dd7e-802a-342e-5f20eae680fc/cluster_4187d5fb-6995-465f-5c7c-0ffcd2fcb479/data/data10]'}, localName='127.0.0.1:33593', datanodeUuid='ac0ce44c-5180-4526-a3b0-db40627349b1', xmitsInProgress=0}:Exception transferring block BP-619597313-172.17.0.2-1733252108905:blk_1073741876_1059 to mirror 127.0.0.1:38523 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T18:55:35,092 WARN [Thread-962 {}] hdfs.DataStreamer(1731): Error Recovery for BP-619597313-172.17.0.2-1733252108905:blk_1073741876_1059 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33593,DS-49a6f974-45fa-4a38-a4e1-bea744d45f50,DISK], DatanodeInfoWithStorage[127.0.0.1:38523,DS-415cd1d5-a105-4c92-b8f8-993dcb323415,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:38523,DS-415cd1d5-a105-4c92-b8f8-993dcb323415,DISK]) is bad. 2024-12-03T18:55:35,092 WARN [Thread-962 {}] hdfs.DataStreamer(1850): Abandoning BP-619597313-172.17.0.2-1733252108905:blk_1073741876_1059 2024-12-03T18:55:35,092 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1037783469_22 at /127.0.0.1:36460 [Receiving block BP-619597313-172.17.0.2-1733252108905:blk_1073741876_1059] {}] datanode.BlockReceiver(316): Block 1073741876 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-12-03T18:55:35,092 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1037783469_22 at /127.0.0.1:36460 [Receiving block BP-619597313-172.17.0.2-1733252108905:blk_1073741876_1059] {}] datanode.DataXceiver(331): 127.0.0.1:33593:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36460 dst: /127.0.0.1:33593 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T18:55:35,093 WARN [Thread-962 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38523,DS-415cd1d5-a105-4c92-b8f8-993dcb323415,DISK] 2024-12-03T18:55:35,094 WARN [IPC Server handler 1 on default port 37681 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-03T18:55:35,094 WARN [IPC Server handler 1 on default port 37681 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-03T18:55:35,094 WARN [IPC Server handler 1 on default port 37681 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-03T18:55:35,097 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:55:35,097 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:55:35,097 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:55:35,097 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:55:35,098 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:55:35,098 INFO [regionserver/db5a5ccf5be8:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.1733252133056 with entries=15, filesize=13.26 KB; new WAL /user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.1733252135076 2024-12-03T18:55:35,099 DEBUG [regionserver/db5a5ccf5be8:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44371:44371)] 2024-12-03T18:55:35,099 DEBUG [regionserver/db5a5ccf5be8:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.1733252112239 is not closed yet, will try archiving it next time 2024-12-03T18:55:35,100 DEBUG [regionserver/db5a5ccf5be8:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.1733252133056 is not closed yet, will try archiving it next time 2024-12-03T18:55:35,100 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.1733252129032 to hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/oldWALs/db5a5ccf5be8%2C46359%2C1733252111199.1733252129032 2024-12-03T18:55:35,100 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33593 is added to blk_1073741857_1040 (size=13591) 2024-12-03T18:55:35,307 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36129,DS-77519873-4acf-4a3b-8908-9fb3fa1b39ff,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T18:55:35,502 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.1733252112239 is not closed yet, will try archiving it next time 2024-12-03T18:55:36,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46359 {}] regionserver.HRegion(8855): Flush requested on ffcdd949a24b91f4996ce458c5691d39 2024-12-03T18:55:36,030 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing ffcdd949a24b91f4996ce458c5691d39 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-12-03T18:55:36,038 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ffcdd949a24b91f4996ce458c5691d39/.tmp/info/1bbe4f46127641389cdfa6e89b127ba9 is 1079, key is tmprow/info:/1733252136027/Put/seqid=0 2024-12-03T18:55:36,040 WARN [Thread-967 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741878_1061 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T18:55:36,040 WARN [Thread-967 {}] hdfs.DataStreamer(1731): Error Recovery for BP-619597313-172.17.0.2-1733252108905:blk_1073741878_1061 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38523,DS-415cd1d5-a105-4c92-b8f8-993dcb323415,DISK], DatanodeInfoWithStorage[127.0.0.1:44945,DS-65653d36-c9d6-40de-810f-8f14689aee01,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38523,DS-415cd1d5-a105-4c92-b8f8-993dcb323415,DISK]) is bad. 2024-12-03T18:55:36,040 WARN [Thread-967 {}] hdfs.DataStreamer(1850): Abandoning BP-619597313-172.17.0.2-1733252108905:blk_1073741878_1061 2024-12-03T18:55:36,041 WARN [Thread-967 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38523,DS-415cd1d5-a105-4c92-b8f8-993dcb323415,DISK] 2024-12-03T18:55:36,043 WARN [Thread-967 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741879_1062 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:36129 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T18:55:36,043 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1037783469_22 at /127.0.0.1:36492 [Receiving block BP-619597313-172.17.0.2-1733252108905:blk_1073741879_1062] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c4a60070-dd7e-802a-342e-5f20eae680fc/cluster_4187d5fb-6995-465f-5c7c-0ffcd2fcb479/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c4a60070-dd7e-802a-342e-5f20eae680fc/cluster_4187d5fb-6995-465f-5c7c-0ffcd2fcb479/data/data10]'}, localName='127.0.0.1:33593', datanodeUuid='ac0ce44c-5180-4526-a3b0-db40627349b1', xmitsInProgress=0}:Exception transferring block BP-619597313-172.17.0.2-1733252108905:blk_1073741879_1062 to mirror 127.0.0.1:36129 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T18:55:36,043 WARN [Thread-967 {}] hdfs.DataStreamer(1731): Error Recovery for BP-619597313-172.17.0.2-1733252108905:blk_1073741879_1062 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33593,DS-49a6f974-45fa-4a38-a4e1-bea744d45f50,DISK], DatanodeInfoWithStorage[127.0.0.1:36129,DS-77519873-4acf-4a3b-8908-9fb3fa1b39ff,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:36129,DS-77519873-4acf-4a3b-8908-9fb3fa1b39ff,DISK]) is bad. 2024-12-03T18:55:36,044 WARN [Thread-967 {}] hdfs.DataStreamer(1850): Abandoning BP-619597313-172.17.0.2-1733252108905:blk_1073741879_1062 2024-12-03T18:55:36,044 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1037783469_22 at /127.0.0.1:36492 [Receiving block BP-619597313-172.17.0.2-1733252108905:blk_1073741879_1062] {}] datanode.BlockReceiver(316): Block 1073741879 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-03T18:55:36,044 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1037783469_22 at /127.0.0.1:36492 [Receiving block BP-619597313-172.17.0.2-1733252108905:blk_1073741879_1062] {}] datanode.DataXceiver(331): 127.0.0.1:33593:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36492 dst: /127.0.0.1:33593 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T18:55:36,044 WARN [Thread-967 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36129,DS-77519873-4acf-4a3b-8908-9fb3fa1b39ff,DISK] 2024-12-03T18:55:36,046 WARN [Thread-967 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741880_1063 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:40429 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T18:55:36,046 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1037783469_22 at /127.0.0.1:36502 [Receiving block BP-619597313-172.17.0.2-1733252108905:blk_1073741880_1063] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c4a60070-dd7e-802a-342e-5f20eae680fc/cluster_4187d5fb-6995-465f-5c7c-0ffcd2fcb479/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c4a60070-dd7e-802a-342e-5f20eae680fc/cluster_4187d5fb-6995-465f-5c7c-0ffcd2fcb479/data/data10]'}, localName='127.0.0.1:33593', datanodeUuid='ac0ce44c-5180-4526-a3b0-db40627349b1', xmitsInProgress=0}:Exception transferring block BP-619597313-172.17.0.2-1733252108905:blk_1073741880_1063 to mirror 127.0.0.1:40429 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T18:55:36,047 WARN [Thread-967 {}] hdfs.DataStreamer(1731): Error Recovery for BP-619597313-172.17.0.2-1733252108905:blk_1073741880_1063 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33593,DS-49a6f974-45fa-4a38-a4e1-bea744d45f50,DISK], DatanodeInfoWithStorage[127.0.0.1:40429,DS-faa8c47c-a6f5-4610-9c09-f1450308231b,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:40429,DS-faa8c47c-a6f5-4610-9c09-f1450308231b,DISK]) is bad. 2024-12-03T18:55:36,047 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1037783469_22 at /127.0.0.1:36502 [Receiving block BP-619597313-172.17.0.2-1733252108905:blk_1073741880_1063] {}] datanode.BlockReceiver(316): Block 1073741880 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-03T18:55:36,047 WARN [Thread-967 {}] hdfs.DataStreamer(1850): Abandoning BP-619597313-172.17.0.2-1733252108905:blk_1073741880_1063 2024-12-03T18:55:36,047 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1037783469_22 at /127.0.0.1:36502 [Receiving block BP-619597313-172.17.0.2-1733252108905:blk_1073741880_1063] {}] datanode.DataXceiver(331): 127.0.0.1:33593:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36502 dst: /127.0.0.1:33593 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T18:55:36,048 WARN [Thread-967 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40429,DS-faa8c47c-a6f5-4610-9c09-f1450308231b,DISK] 2024-12-03T18:55:36,050 WARN [Thread-967 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741881_1064 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:44945 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T18:55:36,050 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1037783469_22 at /127.0.0.1:36518 [Receiving block BP-619597313-172.17.0.2-1733252108905:blk_1073741881_1064] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c4a60070-dd7e-802a-342e-5f20eae680fc/cluster_4187d5fb-6995-465f-5c7c-0ffcd2fcb479/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c4a60070-dd7e-802a-342e-5f20eae680fc/cluster_4187d5fb-6995-465f-5c7c-0ffcd2fcb479/data/data10]'}, localName='127.0.0.1:33593', datanodeUuid='ac0ce44c-5180-4526-a3b0-db40627349b1', xmitsInProgress=0}:Exception transferring block BP-619597313-172.17.0.2-1733252108905:blk_1073741881_1064 to mirror 127.0.0.1:44945 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T18:55:36,051 WARN [Thread-967 {}] hdfs.DataStreamer(1731): Error Recovery for BP-619597313-172.17.0.2-1733252108905:blk_1073741881_1064 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33593,DS-49a6f974-45fa-4a38-a4e1-bea744d45f50,DISK], DatanodeInfoWithStorage[127.0.0.1:44945,DS-65653d36-c9d6-40de-810f-8f14689aee01,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:44945,DS-65653d36-c9d6-40de-810f-8f14689aee01,DISK]) is bad. 2024-12-03T18:55:36,051 WARN [Thread-967 {}] hdfs.DataStreamer(1850): Abandoning BP-619597313-172.17.0.2-1733252108905:blk_1073741881_1064 2024-12-03T18:55:36,051 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1037783469_22 at /127.0.0.1:36518 [Receiving block BP-619597313-172.17.0.2-1733252108905:blk_1073741881_1064] {}] datanode.BlockReceiver(316): Block 1073741881 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-03T18:55:36,051 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1037783469_22 at /127.0.0.1:36518 [Receiving block BP-619597313-172.17.0.2-1733252108905:blk_1073741881_1064] {}] datanode.DataXceiver(331): 127.0.0.1:33593:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36518 dst: /127.0.0.1:33593 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T18:55:36,051 WARN [Thread-967 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44945,DS-65653d36-c9d6-40de-810f-8f14689aee01,DISK] 2024-12-03T18:55:36,052 WARN [IPC Server handler 1 on default port 37681 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-03T18:55:36,052 WARN [IPC Server handler 1 on default port 37681 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-03T18:55:36,052 WARN [IPC Server handler 1 on default port 37681 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-03T18:55:36,055 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33593 is added to blk_1073741882_1065 (size=6027) 2024-12-03T18:55:36,456 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=55 (bloomFilter=true), to=hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ffcdd949a24b91f4996ce458c5691d39/.tmp/info/1bbe4f46127641389cdfa6e89b127ba9 2024-12-03T18:55:36,465 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ffcdd949a24b91f4996ce458c5691d39/.tmp/info/1bbe4f46127641389cdfa6e89b127ba9 as hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ffcdd949a24b91f4996ce458c5691d39/info/1bbe4f46127641389cdfa6e89b127ba9 2024-12-03T18:55:36,474 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ffcdd949a24b91f4996ce458c5691d39/info/1bbe4f46127641389cdfa6e89b127ba9, entries=1, sequenceid=55, filesize=5.9 K 2024-12-03T18:55:36,475 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for ffcdd949a24b91f4996ce458c5691d39 in 445ms, sequenceid=55, compaction requested=true 2024-12-03T18:55:36,475 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for ffcdd949a24b91f4996ce458c5691d39: 2024-12-03T18:55:36,475 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=29.3 K, sizeToCheck=16.0 K 2024-12-03T18:55:36,476 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-03T18:55:36,476 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ffcdd949a24b91f4996ce458c5691d39/info/e5666c7d62914286ab14a862b04d02d7 because midkey is the same as first or last row 2024-12-03T18:55:36,476 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ffcdd949a24b91f4996ce458c5691d39:info, priority=-2147483648, current under compaction store size is 1 2024-12-03T18:55:36,476 DEBUG [RS:0;db5a5ccf5be8:46359-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-03T18:55:36,476 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T18:55:36,478 DEBUG [RS:0;db5a5ccf5be8:46359-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 30048 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-03T18:55:36,478 DEBUG [RS:0;db5a5ccf5be8:46359-shortCompactions-0 {}] regionserver.HStore(1541): ffcdd949a24b91f4996ce458c5691d39/info is initiating minor compaction (all files) 2024-12-03T18:55:36,478 INFO [RS:0;db5a5ccf5be8:46359-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of ffcdd949a24b91f4996ce458c5691d39/info in TestLogRolling-testLogRollOnDatanodeDeath,,1733252112988.ffcdd949a24b91f4996ce458c5691d39. 2024-12-03T18:55:36,478 INFO [RS:0;db5a5ccf5be8:46359-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ffcdd949a24b91f4996ce458c5691d39/info/e5666c7d62914286ab14a862b04d02d7, hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ffcdd949a24b91f4996ce458c5691d39/info/36bbcc6c3bb64f4aa817826c86b5c400, hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ffcdd949a24b91f4996ce458c5691d39/info/1bbe4f46127641389cdfa6e89b127ba9] into tmpdir=hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ffcdd949a24b91f4996ce458c5691d39/.tmp, totalSize=29.3 K 2024-12-03T18:55:36,479 DEBUG [RS:0;db5a5ccf5be8:46359-shortCompactions-0 {}] compactions.Compactor(225): Compacting e5666c7d62914286ab14a862b04d02d7, keycount=12, bloomtype=ROW, size=17.6 K, encoding=NONE, compression=NONE, seqNum=34, earliestPutTs=1733252127070 2024-12-03T18:55:36,479 DEBUG [RS:0;db5a5ccf5be8:46359-shortCompactions-0 {}] compactions.Compactor(225): Compacting 36bbcc6c3bb64f4aa817826c86b5c400, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=45, earliestPutTs=1733252134588 2024-12-03T18:55:36,480 DEBUG [RS:0;db5a5ccf5be8:46359-shortCompactions-0 {}] compactions.Compactor(225): Compacting 1bbe4f46127641389cdfa6e89b127ba9, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1733252136027 2024-12-03T18:55:36,499 INFO [RS:0;db5a5ccf5be8:46359-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ffcdd949a24b91f4996ce458c5691d39#info#compaction#24 average throughput is 6.16 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-03T18:55:36,500 DEBUG [RS:0;db5a5ccf5be8:46359-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ffcdd949a24b91f4996ce458c5691d39/.tmp/info/04db6e607b2a4790b7ac8e995dd19397 is 1080, key is row0002/info:/1733252127070/Put/seqid=0 2024-12-03T18:55:36,501 WARN [Thread-974 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741883_1066 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T18:55:36,501 WARN [Thread-974 {}] hdfs.DataStreamer(1731): Error Recovery for BP-619597313-172.17.0.2-1733252108905:blk_1073741883_1066 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36129,DS-77519873-4acf-4a3b-8908-9fb3fa1b39ff,DISK], DatanodeInfoWithStorage[127.0.0.1:38523,DS-415cd1d5-a105-4c92-b8f8-993dcb323415,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36129,DS-77519873-4acf-4a3b-8908-9fb3fa1b39ff,DISK]) is bad. 2024-12-03T18:55:36,501 WARN [Thread-974 {}] hdfs.DataStreamer(1850): Abandoning BP-619597313-172.17.0.2-1733252108905:blk_1073741883_1066 2024-12-03T18:55:36,502 WARN [Thread-974 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36129,DS-77519873-4acf-4a3b-8908-9fb3fa1b39ff,DISK] 2024-12-03T18:55:36,503 WARN [Thread-974 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741884_1067 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T18:55:36,503 WARN [Thread-974 {}] hdfs.DataStreamer(1731): Error Recovery for BP-619597313-172.17.0.2-1733252108905:blk_1073741884_1067 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44945,DS-65653d36-c9d6-40de-810f-8f14689aee01,DISK], DatanodeInfoWithStorage[127.0.0.1:33593,DS-49a6f974-45fa-4a38-a4e1-bea744d45f50,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44945,DS-65653d36-c9d6-40de-810f-8f14689aee01,DISK]) is bad. 2024-12-03T18:55:36,503 WARN [Thread-974 {}] hdfs.DataStreamer(1850): Abandoning BP-619597313-172.17.0.2-1733252108905:blk_1073741884_1067 2024-12-03T18:55:36,503 WARN [Thread-974 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44945,DS-65653d36-c9d6-40de-810f-8f14689aee01,DISK] 2024-12-03T18:55:36,505 WARN [Thread-974 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741885_1068 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T18:55:36,505 WARN [Thread-974 {}] hdfs.DataStreamer(1731): Error Recovery for BP-619597313-172.17.0.2-1733252108905:blk_1073741885_1068 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38523,DS-415cd1d5-a105-4c92-b8f8-993dcb323415,DISK], DatanodeInfoWithStorage[127.0.0.1:40429,DS-faa8c47c-a6f5-4610-9c09-f1450308231b,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38523,DS-415cd1d5-a105-4c92-b8f8-993dcb323415,DISK]) is bad. 2024-12-03T18:55:36,505 WARN [Thread-974 {}] hdfs.DataStreamer(1850): Abandoning BP-619597313-172.17.0.2-1733252108905:blk_1073741885_1068 2024-12-03T18:55:36,505 WARN [Thread-974 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38523,DS-415cd1d5-a105-4c92-b8f8-993dcb323415,DISK] 2024-12-03T18:55:36,507 WARN [Thread-974 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741886_1069 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:40429 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T18:55:36,507 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1037783469_22 at /127.0.0.1:36536 [Receiving block BP-619597313-172.17.0.2-1733252108905:blk_1073741886_1069] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c4a60070-dd7e-802a-342e-5f20eae680fc/cluster_4187d5fb-6995-465f-5c7c-0ffcd2fcb479/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c4a60070-dd7e-802a-342e-5f20eae680fc/cluster_4187d5fb-6995-465f-5c7c-0ffcd2fcb479/data/data10]'}, localName='127.0.0.1:33593', datanodeUuid='ac0ce44c-5180-4526-a3b0-db40627349b1', xmitsInProgress=0}:Exception transferring block BP-619597313-172.17.0.2-1733252108905:blk_1073741886_1069 to mirror 127.0.0.1:40429 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T18:55:36,508 WARN [Thread-974 {}] hdfs.DataStreamer(1731): Error Recovery for BP-619597313-172.17.0.2-1733252108905:blk_1073741886_1069 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33593,DS-49a6f974-45fa-4a38-a4e1-bea744d45f50,DISK], DatanodeInfoWithStorage[127.0.0.1:40429,DS-faa8c47c-a6f5-4610-9c09-f1450308231b,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:40429,DS-faa8c47c-a6f5-4610-9c09-f1450308231b,DISK]) is bad. 2024-12-03T18:55:36,508 WARN [Thread-974 {}] hdfs.DataStreamer(1850): Abandoning BP-619597313-172.17.0.2-1733252108905:blk_1073741886_1069 2024-12-03T18:55:36,508 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1037783469_22 at /127.0.0.1:36536 [Receiving block BP-619597313-172.17.0.2-1733252108905:blk_1073741886_1069] {}] datanode.BlockReceiver(316): Block 1073741886 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-03T18:55:36,508 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1037783469_22 at /127.0.0.1:36536 [Receiving block BP-619597313-172.17.0.2-1733252108905:blk_1073741886_1069] {}] datanode.DataXceiver(331): 127.0.0.1:33593:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36536 dst: /127.0.0.1:33593 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T18:55:36,508 WARN [Thread-974 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40429,DS-faa8c47c-a6f5-4610-9c09-f1450308231b,DISK] 2024-12-03T18:55:36,509 WARN [IPC Server handler 1 on default port 37681 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-03T18:55:36,509 WARN [IPC Server handler 1 on default port 37681 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-03T18:55:36,509 WARN [IPC Server handler 1 on default port 37681 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-03T18:55:36,512 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33593 is added to blk_1073741887_1070 (size=18097) 2024-12-03T18:55:36,922 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@ef50e6e[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:33593, datanodeUuid=ac0ce44c-5180-4526-a3b0-db40627349b1, infoPort=44371, infoSecurePort=0, ipcPort=42883, storageInfo=lv=-57;cid=testClusterID;nsid=1292028406;c=1733252108905):Failed to transfer BP-619597313-172.17.0.2-1733252108905:blk_1073741872_1055 to 127.0.0.1:40429 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T18:55:36,922 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@aca8b68[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:33593, datanodeUuid=ac0ce44c-5180-4526-a3b0-db40627349b1, infoPort=44371, infoSecurePort=0, ipcPort=42883, storageInfo=lv=-57;cid=testClusterID;nsid=1292028406;c=1733252108905):Failed to transfer BP-619597313-172.17.0.2-1733252108905:blk_1073741867_1050 to 127.0.0.1:44945 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T18:55:36,927 DEBUG [RS:0;db5a5ccf5be8:46359-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ffcdd949a24b91f4996ce458c5691d39/.tmp/info/04db6e607b2a4790b7ac8e995dd19397 as hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ffcdd949a24b91f4996ce458c5691d39/info/04db6e607b2a4790b7ac8e995dd19397 2024-12-03T18:55:36,935 INFO [RS:0;db5a5ccf5be8:46359-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in ffcdd949a24b91f4996ce458c5691d39/info of ffcdd949a24b91f4996ce458c5691d39 into 04db6e607b2a4790b7ac8e995dd19397(size=17.7 K), total size for store is 17.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T18:55:36,935 DEBUG [RS:0;db5a5ccf5be8:46359-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for ffcdd949a24b91f4996ce458c5691d39: 2024-12-03T18:55:36,935 INFO [RS:0;db5a5ccf5be8:46359-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRollOnDatanodeDeath,,1733252112988.ffcdd949a24b91f4996ce458c5691d39., storeName=ffcdd949a24b91f4996ce458c5691d39/info, priority=13, startTime=1733252136476; duration=0sec 2024-12-03T18:55:36,935 DEBUG [RS:0;db5a5ccf5be8:46359-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.7 K, sizeToCheck=16.0 K 2024-12-03T18:55:36,935 DEBUG [RS:0;db5a5ccf5be8:46359-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-03T18:55:36,935 DEBUG [RS:0;db5a5ccf5be8:46359-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ffcdd949a24b91f4996ce458c5691d39/info/04db6e607b2a4790b7ac8e995dd19397 because midkey is the same as first or last row 2024-12-03T18:55:36,935 DEBUG [RS:0;db5a5ccf5be8:46359-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.7 K, sizeToCheck=16.0 K 2024-12-03T18:55:36,935 DEBUG [RS:0;db5a5ccf5be8:46359-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-03T18:55:36,935 DEBUG [RS:0;db5a5ccf5be8:46359-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ffcdd949a24b91f4996ce458c5691d39/info/04db6e607b2a4790b7ac8e995dd19397 because midkey is the same as first or last row 2024-12-03T18:55:36,935 DEBUG [RS:0;db5a5ccf5be8:46359-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.7 K, sizeToCheck=16.0 K 2024-12-03T18:55:36,935 DEBUG [RS:0;db5a5ccf5be8:46359-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-03T18:55:36,935 DEBUG [RS:0;db5a5ccf5be8:46359-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ffcdd949a24b91f4996ce458c5691d39/info/04db6e607b2a4790b7ac8e995dd19397 because midkey is the same as first or last row 2024-12-03T18:55:36,935 DEBUG [RS:0;db5a5ccf5be8:46359-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T18:55:36,935 DEBUG [RS:0;db5a5ccf5be8:46359-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ffcdd949a24b91f4996ce458c5691d39:info 2024-12-03T18:55:36,962 INFO [regionserver/db5a5ccf5be8:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36129,DS-77519873-4acf-4a3b-8908-9fb3fa1b39ff,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T18:55:37,100 INFO [regionserver/db5a5ccf5be8:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36129,DS-77519873-4acf-4a3b-8908-9fb3fa1b39ff,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T18:55:37,100 WARN [regionserver/db5a5ccf5be8:0.logRoller {}] wal.FSHLog(539): Too many consecutive RollWriter requests, it's a sign of the total number of live datanodes is lower than the tolerable replicas. 2024-12-03T18:55:37,268 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-03T18:55:37,272 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-03T18:55:37,273 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-03T18:55:37,273 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-03T18:55:37,273 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-03T18:55:37,273 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@441dcfc4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c4a60070-dd7e-802a-342e-5f20eae680fc/hadoop.log.dir/,AVAILABLE} 2024-12-03T18:55:37,274 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@381443d3{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-03T18:55:37,308 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36129,DS-77519873-4acf-4a3b-8908-9fb3fa1b39ff,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T18:55:37,363 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@14b00457{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c4a60070-dd7e-802a-342e-5f20eae680fc/java.io.tmpdir/jetty-localhost-46653-hadoop-hdfs-3_4_1-tests_jar-_-any-5808730576048057473/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T18:55:37,364 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@22c6c03b{HTTP/1.1, (http/1.1)}{localhost:46653} 2024-12-03T18:55:37,364 INFO [Time-limited test {}] server.Server(415): Started @135240ms 2024-12-03T18:55:37,365 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-03T18:55:37,794 WARN [Thread-993 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-03T18:55:37,804 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x9073cedecec659e2 with lease ID 0xe29c90a44304063f: from storage DS-faa8c47c-a6f5-4610-9c09-f1450308231b node DatanodeRegistration(127.0.0.1:37155, datanodeUuid=ff2ed10a-1a62-4254-b7ab-13ac5358207a, infoPort=44423, infoSecurePort=0, ipcPort=33585, storageInfo=lv=-57;cid=testClusterID;nsid=1292028406;c=1733252108905), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-03T18:55:37,804 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x9073cedecec659e2 with lease ID 0xe29c90a44304063f: from storage DS-73705ed8-497c-456d-9e1b-d162725e229f node DatanodeRegistration(127.0.0.1:37155, datanodeUuid=ff2ed10a-1a62-4254-b7ab-13ac5358207a, infoPort=44423, infoSecurePort=0, ipcPort=33585, storageInfo=lv=-57;cid=testClusterID;nsid=1292028406;c=1733252108905), blocks: 7, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-03T18:55:37,921 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@aca8b68[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:33593, datanodeUuid=ac0ce44c-5180-4526-a3b0-db40627349b1, infoPort=44371, infoSecurePort=0, ipcPort=42883, storageInfo=lv=-57;cid=testClusterID;nsid=1292028406;c=1733252108905):Failed to transfer BP-619597313-172.17.0.2-1733252108905:blk_1073741857_1040 to 127.0.0.1:36129 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T18:55:37,923 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37155 is added to blk_1073741882_1065 (size=6027) 2024-12-03T18:55:38,963 INFO [regionserver/db5a5ccf5be8:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36129,DS-77519873-4acf-4a3b-8908-9fb3fa1b39ff,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T18:55:39,101 INFO [regionserver/db5a5ccf5be8:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36129,DS-77519873-4acf-4a3b-8908-9fb3fa1b39ff,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T18:55:39,308 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36129,DS-77519873-4acf-4a3b-8908-9fb3fa1b39ff,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T18:55:39,922 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@ef50e6e[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:33593, datanodeUuid=ac0ce44c-5180-4526-a3b0-db40627349b1, infoPort=44371, infoSecurePort=0, ipcPort=42883, storageInfo=lv=-57;cid=testClusterID;nsid=1292028406;c=1733252108905):Failed to transfer BP-619597313-172.17.0.2-1733252108905:blk_1073741887_1070 to 127.0.0.1:38523 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T18:55:40,963 INFO [regionserver/db5a5ccf5be8:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36129,DS-77519873-4acf-4a3b-8908-9fb3fa1b39ff,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T18:55:41,011 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-03T18:55:41,101 INFO [regionserver/db5a5ccf5be8:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36129,DS-77519873-4acf-4a3b-8908-9fb3fa1b39ff,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T18:55:41,309 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36129,DS-77519873-4acf-4a3b-8908-9fb3fa1b39ff,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T18:55:41,993 ERROR [FSHLog-0-hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/MasterData-prefix:db5a5ccf5be8,39725,1733252111031 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36129,DS-77519873-4acf-4a3b-8908-9fb3fa1b39ff,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T18:55:41,993 WARN [FSHLog-0-hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/MasterData-prefix:db5a5ccf5be8,39725,1733252111031 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36129,DS-77519873-4acf-4a3b-8908-9fb3fa1b39ff,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T18:55:41,994 DEBUG [master:store-WAL-Roller {}] wal.AbstractWALRoller(198): WAL FSHLog db5a5ccf5be8%2C39725%2C1733252111031:(num 1733252111735) roll requested 2024-12-03T18:55:41,994 INFO [master:store-WAL-Roller {}] monitor.StreamSlowMonitor(122): New stream slow monitor db5a5ccf5be8%2C39725%2C1733252111031.1733252141994 2024-12-03T18:55:41,999 WARN [Thread-1014 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741888_1071 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:44945 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T18:55:41,999 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1359548398_22 at /127.0.0.1:41548 [Receiving block BP-619597313-172.17.0.2-1733252108905:blk_1073741888_1071] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c4a60070-dd7e-802a-342e-5f20eae680fc/cluster_4187d5fb-6995-465f-5c7c-0ffcd2fcb479/data/data3, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c4a60070-dd7e-802a-342e-5f20eae680fc/cluster_4187d5fb-6995-465f-5c7c-0ffcd2fcb479/data/data4]'}, localName='127.0.0.1:37155', datanodeUuid='ff2ed10a-1a62-4254-b7ab-13ac5358207a', xmitsInProgress=0}:Exception transferring block BP-619597313-172.17.0.2-1733252108905:blk_1073741888_1071 to mirror 127.0.0.1:44945 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T18:55:41,999 WARN [Thread-1014 {}] hdfs.DataStreamer(1731): Error Recovery for BP-619597313-172.17.0.2-1733252108905:blk_1073741888_1071 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37155,DS-faa8c47c-a6f5-4610-9c09-f1450308231b,DISK], DatanodeInfoWithStorage[127.0.0.1:44945,DS-65653d36-c9d6-40de-810f-8f14689aee01,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:44945,DS-65653d36-c9d6-40de-810f-8f14689aee01,DISK]) is bad. 2024-12-03T18:55:41,999 WARN [Thread-1014 {}] hdfs.DataStreamer(1850): Abandoning BP-619597313-172.17.0.2-1733252108905:blk_1073741888_1071 2024-12-03T18:55:42,000 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1359548398_22 at /127.0.0.1:41548 [Receiving block BP-619597313-172.17.0.2-1733252108905:blk_1073741888_1071] {}] datanode.BlockReceiver(316): Block 1073741888 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-12-03T18:55:42,000 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1359548398_22 at /127.0.0.1:41548 [Receiving block BP-619597313-172.17.0.2-1733252108905:blk_1073741888_1071] {}] datanode.DataXceiver(331): 127.0.0.1:37155:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41548 dst: /127.0.0.1:37155 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T18:55:42,000 WARN [Thread-1014 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44945,DS-65653d36-c9d6-40de-810f-8f14689aee01,DISK] 2024-12-03T18:55:42,003 WARN [Thread-1014 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741889_1072 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:38523 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T18:55:42,003 WARN [Thread-1014 {}] hdfs.DataStreamer(1731): Error Recovery for BP-619597313-172.17.0.2-1733252108905:blk_1073741889_1072 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37155,DS-faa8c47c-a6f5-4610-9c09-f1450308231b,DISK], DatanodeInfoWithStorage[127.0.0.1:38523,DS-415cd1d5-a105-4c92-b8f8-993dcb323415,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:38523,DS-415cd1d5-a105-4c92-b8f8-993dcb323415,DISK]) is bad. 2024-12-03T18:55:42,003 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1359548398_22 at /127.0.0.1:41564 [Receiving block BP-619597313-172.17.0.2-1733252108905:blk_1073741889_1072] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c4a60070-dd7e-802a-342e-5f20eae680fc/cluster_4187d5fb-6995-465f-5c7c-0ffcd2fcb479/data/data3, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c4a60070-dd7e-802a-342e-5f20eae680fc/cluster_4187d5fb-6995-465f-5c7c-0ffcd2fcb479/data/data4]'}, localName='127.0.0.1:37155', datanodeUuid='ff2ed10a-1a62-4254-b7ab-13ac5358207a', xmitsInProgress=0}:Exception transferring block BP-619597313-172.17.0.2-1733252108905:blk_1073741889_1072 to mirror 127.0.0.1:38523 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T18:55:42,003 WARN [Thread-1014 {}] hdfs.DataStreamer(1850): Abandoning BP-619597313-172.17.0.2-1733252108905:blk_1073741889_1072 2024-12-03T18:55:42,003 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1359548398_22 at /127.0.0.1:41564 [Receiving block BP-619597313-172.17.0.2-1733252108905:blk_1073741889_1072] {}] datanode.BlockReceiver(316): Block 1073741889 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-12-03T18:55:42,004 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1359548398_22 at /127.0.0.1:41564 [Receiving block BP-619597313-172.17.0.2-1733252108905:blk_1073741889_1072] {}] datanode.DataXceiver(331): 127.0.0.1:37155:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41564 dst: /127.0.0.1:37155 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T18:55:42,004 WARN [Thread-1014 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38523,DS-415cd1d5-a105-4c92-b8f8-993dcb323415,DISK] 2024-12-03T18:55:42,008 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:55:42,008 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:55:42,008 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:55:42,008 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:55:42,008 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:55:42,009 INFO [master:store-WAL-Roller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/MasterData/WALs/db5a5ccf5be8,39725,1733252111031/db5a5ccf5be8%2C39725%2C1733252111031.1733252111735 with entries=54, filesize=26.65 KB; new WAL /user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/MasterData/WALs/db5a5ccf5be8,39725,1733252111031/db5a5ccf5be8%2C39725%2C1733252111031.1733252141994 2024-12-03T18:55:42,009 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36129,DS-77519873-4acf-4a3b-8908-9fb3fa1b39ff,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T18:55:42,009 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36129,DS-77519873-4acf-4a3b-8908-9fb3fa1b39ff,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T18:55:42,009 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/MasterData/WALs/db5a5ccf5be8,39725,1733252111031/db5a5ccf5be8%2C39725%2C1733252111031.1733252111735 2024-12-03T18:55:42,009 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44371:44371),(127.0.0.1/127.0.0.1:44423:44423)] 2024-12-03T18:55:42,009 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(879): hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/MasterData/WALs/db5a5ccf5be8,39725,1733252111031/db5a5ccf5be8%2C39725%2C1733252111031.1733252111735 is not closed yet, will try archiving it next time 2024-12-03T18:55:42,009 WARN [IPC Server handler 3 on default port 37681 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/MasterData/WALs/db5a5ccf5be8,39725,1733252111031/db5a5ccf5be8%2C39725%2C1733252111031.1733252111735 has not been closed. Lease recovery is in progress. RecoveryId = 1074 for block blk_1073741830_1006 2024-12-03T18:55:42,010 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/MasterData/WALs/db5a5ccf5be8,39725,1733252111031/db5a5ccf5be8%2C39725%2C1733252111031.1733252111735 after 1ms 2024-12-03T18:55:42,964 INFO [regionserver/db5a5ccf5be8:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36129,DS-77519873-4acf-4a3b-8908-9fb3fa1b39ff,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T18:55:43,102 INFO [regionserver/db5a5ccf5be8:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36129,DS-77519873-4acf-4a3b-8908-9fb3fa1b39ff,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T18:55:44,964 INFO [regionserver/db5a5ccf5be8:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36129,DS-77519873-4acf-4a3b-8908-9fb3fa1b39ff,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T18:55:45,103 INFO [regionserver/db5a5ccf5be8:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36129,DS-77519873-4acf-4a3b-8908-9fb3fa1b39ff,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T18:55:46,012 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/MasterData/WALs/db5a5ccf5be8,39725,1733252111031/db5a5ccf5be8%2C39725%2C1733252111031.1733252111735 after 4003ms 2024-12-03T18:55:46,964 INFO [regionserver/db5a5ccf5be8:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36129,DS-77519873-4acf-4a3b-8908-9fb3fa1b39ff,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T18:55:47,103 INFO [regionserver/db5a5ccf5be8:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36129,DS-77519873-4acf-4a3b-8908-9fb3fa1b39ff,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T18:55:47,816 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@5b21038d {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-619597313-172.17.0.2-1733252108905:blk_1073741833_1009, datanode=DatanodeInfoWithStorage[127.0.0.1:36129,null,null]) java.net.ConnectException: Call From db5a5ccf5be8/172.17.0.2 to localhost:44411 failed on connection exception: java.net.ConnectException: Connection refused; For more details see: http://wiki.apache.org/hadoop/ConnectionRefused at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:876) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:668) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-12-03T18:55:47,818 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37155 is added to blk_1073741833_1019 (size=455) 2024-12-03T18:55:48,062 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.1733252112239 to hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/oldWALs/db5a5ccf5be8%2C46359%2C1733252111199.1733252112239 2024-12-03T18:55:48,064 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.1733252133056 to hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/oldWALs/db5a5ccf5be8%2C46359%2C1733252111199.1733252133056 2024-12-03T18:55:48,797 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@3b0efcc2[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:37155, datanodeUuid=ff2ed10a-1a62-4254-b7ab-13ac5358207a, infoPort=44423, infoSecurePort=0, ipcPort=33585, storageInfo=lv=-57;cid=testClusterID;nsid=1292028406;c=1733252108905):Failed to transfer BP-619597313-172.17.0.2-1733252108905:blk_1073741833_1019 to 127.0.0.1:38523 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T18:55:48,965 INFO [regionserver/db5a5ccf5be8:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36129,DS-77519873-4acf-4a3b-8908-9fb3fa1b39ff,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T18:55:49,104 INFO [regionserver/db5a5ccf5be8:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36129,DS-77519873-4acf-4a3b-8908-9fb3fa1b39ff,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T18:55:50,848 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor db5a5ccf5be8%2C46359%2C1733252111199.1733252150847 2024-12-03T18:55:50,851 WARN [Thread-1028 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741891_1075 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T18:55:50,851 WARN [Thread-1028 {}] hdfs.DataStreamer(1731): Error Recovery for BP-619597313-172.17.0.2-1733252108905:blk_1073741891_1075 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36129,DS-77519873-4acf-4a3b-8908-9fb3fa1b39ff,DISK], DatanodeInfoWithStorage[127.0.0.1:44945,DS-65653d36-c9d6-40de-810f-8f14689aee01,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36129,DS-77519873-4acf-4a3b-8908-9fb3fa1b39ff,DISK]) is bad. 2024-12-03T18:55:50,851 WARN [Thread-1028 {}] hdfs.DataStreamer(1850): Abandoning BP-619597313-172.17.0.2-1733252108905:blk_1073741891_1075 2024-12-03T18:55:50,852 WARN [Thread-1028 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36129,DS-77519873-4acf-4a3b-8908-9fb3fa1b39ff,DISK] 2024-12-03T18:55:50,855 WARN [Thread-1028 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741892_1076 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:38523 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T18:55:50,855 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1359548398_22 at /127.0.0.1:41576 [Receiving block BP-619597313-172.17.0.2-1733252108905:blk_1073741892_1076] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c4a60070-dd7e-802a-342e-5f20eae680fc/cluster_4187d5fb-6995-465f-5c7c-0ffcd2fcb479/data/data3, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c4a60070-dd7e-802a-342e-5f20eae680fc/cluster_4187d5fb-6995-465f-5c7c-0ffcd2fcb479/data/data4]'}, localName='127.0.0.1:37155', datanodeUuid='ff2ed10a-1a62-4254-b7ab-13ac5358207a', xmitsInProgress=0}:Exception transferring block BP-619597313-172.17.0.2-1733252108905:blk_1073741892_1076 to mirror 127.0.0.1:38523 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T18:55:50,855 WARN [Thread-1028 {}] hdfs.DataStreamer(1731): Error Recovery for BP-619597313-172.17.0.2-1733252108905:blk_1073741892_1076 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37155,DS-faa8c47c-a6f5-4610-9c09-f1450308231b,DISK], DatanodeInfoWithStorage[127.0.0.1:38523,DS-415cd1d5-a105-4c92-b8f8-993dcb323415,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:38523,DS-415cd1d5-a105-4c92-b8f8-993dcb323415,DISK]) is bad. 2024-12-03T18:55:50,855 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1359548398_22 at /127.0.0.1:41576 [Receiving block BP-619597313-172.17.0.2-1733252108905:blk_1073741892_1076] {}] datanode.BlockReceiver(316): Block 1073741892 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-12-03T18:55:50,855 WARN [Thread-1028 {}] hdfs.DataStreamer(1850): Abandoning BP-619597313-172.17.0.2-1733252108905:blk_1073741892_1076 2024-12-03T18:55:50,855 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1359548398_22 at /127.0.0.1:41576 [Receiving block BP-619597313-172.17.0.2-1733252108905:blk_1073741892_1076] {}] datanode.DataXceiver(331): 127.0.0.1:37155:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41576 dst: /127.0.0.1:37155 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T18:55:50,856 WARN [Thread-1028 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38523,DS-415cd1d5-a105-4c92-b8f8-993dcb323415,DISK] 2024-12-03T18:55:50,857 WARN [Thread-1028 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741893_1077 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T18:55:50,858 WARN [Thread-1028 {}] hdfs.DataStreamer(1731): Error Recovery for BP-619597313-172.17.0.2-1733252108905:blk_1073741893_1077 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44945,DS-65653d36-c9d6-40de-810f-8f14689aee01,DISK], DatanodeInfoWithStorage[127.0.0.1:37155,DS-faa8c47c-a6f5-4610-9c09-f1450308231b,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44945,DS-65653d36-c9d6-40de-810f-8f14689aee01,DISK]) is bad. 2024-12-03T18:55:50,858 WARN [Thread-1028 {}] hdfs.DataStreamer(1850): Abandoning BP-619597313-172.17.0.2-1733252108905:blk_1073741893_1077 2024-12-03T18:55:50,858 WARN [Thread-1028 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44945,DS-65653d36-c9d6-40de-810f-8f14689aee01,DISK] 2024-12-03T18:55:50,862 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:55:50,863 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:55:50,863 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:55:50,863 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:55:50,863 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:55:50,863 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.1733252135076 with entries=13, filesize=12.60 KB; new WAL /user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.1733252150847 2024-12-03T18:55:50,864 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44423:44423),(127.0.0.1/127.0.0.1:44371:44371)] 2024-12-03T18:55:50,864 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.1733252135076 is not closed yet, will try archiving it next time 2024-12-03T18:55:50,864 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33593 is added to blk_1073741877_1060 (size=12911) 2024-12-03T18:55:50,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46359 {}] regionserver.HRegion(8855): Flush requested on ffcdd949a24b91f4996ce458c5691d39 2024-12-03T18:55:50,869 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing ffcdd949a24b91f4996ce458c5691d39 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-12-03T18:55:50,873 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ffcdd949a24b91f4996ce458c5691d39/.tmp/info/8d35a366cd04493da8d5bf339d3cd8d6 is 1080, key is row0013/info:/1733252150866/Put/seqid=0 2024-12-03T18:55:50,875 WARN [Thread-1035 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741895_1079 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T18:55:50,876 WARN [Thread-1035 {}] hdfs.DataStreamer(1731): Error Recovery for BP-619597313-172.17.0.2-1733252108905:blk_1073741895_1079 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38523,DS-415cd1d5-a105-4c92-b8f8-993dcb323415,DISK], DatanodeInfoWithStorage[127.0.0.1:37155,DS-faa8c47c-a6f5-4610-9c09-f1450308231b,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38523,DS-415cd1d5-a105-4c92-b8f8-993dcb323415,DISK]) is bad. 2024-12-03T18:55:50,876 WARN [Thread-1035 {}] hdfs.DataStreamer(1850): Abandoning BP-619597313-172.17.0.2-1733252108905:blk_1073741895_1079 2024-12-03T18:55:50,876 WARN [Thread-1035 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38523,DS-415cd1d5-a105-4c92-b8f8-993dcb323415,DISK] 2024-12-03T18:55:50,879 WARN [Thread-1035 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741896_1080 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:44945 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T18:55:50,879 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1037783469_22 at /127.0.0.1:41596 [Receiving block BP-619597313-172.17.0.2-1733252108905:blk_1073741896_1080] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c4a60070-dd7e-802a-342e-5f20eae680fc/cluster_4187d5fb-6995-465f-5c7c-0ffcd2fcb479/data/data3, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c4a60070-dd7e-802a-342e-5f20eae680fc/cluster_4187d5fb-6995-465f-5c7c-0ffcd2fcb479/data/data4]'}, localName='127.0.0.1:37155', datanodeUuid='ff2ed10a-1a62-4254-b7ab-13ac5358207a', xmitsInProgress=0}:Exception transferring block BP-619597313-172.17.0.2-1733252108905:blk_1073741896_1080 to mirror 127.0.0.1:44945 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T18:55:50,879 WARN [Thread-1035 {}] hdfs.DataStreamer(1731): Error Recovery for BP-619597313-172.17.0.2-1733252108905:blk_1073741896_1080 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37155,DS-faa8c47c-a6f5-4610-9c09-f1450308231b,DISK], DatanodeInfoWithStorage[127.0.0.1:44945,DS-65653d36-c9d6-40de-810f-8f14689aee01,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:44945,DS-65653d36-c9d6-40de-810f-8f14689aee01,DISK]) is bad. 2024-12-03T18:55:50,879 WARN [Thread-1035 {}] hdfs.DataStreamer(1850): Abandoning BP-619597313-172.17.0.2-1733252108905:blk_1073741896_1080 2024-12-03T18:55:50,879 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1037783469_22 at /127.0.0.1:41596 [Receiving block BP-619597313-172.17.0.2-1733252108905:blk_1073741896_1080] {}] datanode.BlockReceiver(316): Block 1073741896 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-03T18:55:50,879 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1037783469_22 at /127.0.0.1:41596 [Receiving block BP-619597313-172.17.0.2-1733252108905:blk_1073741896_1080] {}] datanode.DataXceiver(331): 127.0.0.1:37155:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41596 dst: /127.0.0.1:37155 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T18:55:50,880 WARN [Thread-1035 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44945,DS-65653d36-c9d6-40de-810f-8f14689aee01,DISK] 2024-12-03T18:55:50,886 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33593 is added to blk_1073741897_1081 (size=8190) 2024-12-03T18:55:50,887 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37155 is added to blk_1073741897_1081 (size=8190) 2024-12-03T18:55:50,887 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=66 (bloomFilter=true), to=hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ffcdd949a24b91f4996ce458c5691d39/.tmp/info/8d35a366cd04493da8d5bf339d3cd8d6 2024-12-03T18:55:50,894 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ffcdd949a24b91f4996ce458c5691d39/.tmp/info/8d35a366cd04493da8d5bf339d3cd8d6 as hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ffcdd949a24b91f4996ce458c5691d39/info/8d35a366cd04493da8d5bf339d3cd8d6 2024-12-03T18:55:50,900 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ffcdd949a24b91f4996ce458c5691d39/info/8d35a366cd04493da8d5bf339d3cd8d6, entries=3, sequenceid=66, filesize=8.0 K 2024-12-03T18:55:50,901 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7527, heapSize ~8.11 KB/8304, currentSize=9.46 KB/9683 for ffcdd949a24b91f4996ce458c5691d39 in 32ms, sequenceid=66, compaction requested=false 2024-12-03T18:55:50,902 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for ffcdd949a24b91f4996ce458c5691d39: 2024-12-03T18:55:50,902 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=25.7 K, sizeToCheck=16.0 K 2024-12-03T18:55:50,902 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-03T18:55:50,902 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ffcdd949a24b91f4996ce458c5691d39/info/04db6e607b2a4790b7ac8e995dd19397 because midkey is the same as first or last row 2024-12-03T18:55:50,965 INFO [regionserver/db5a5ccf5be8:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36129,DS-77519873-4acf-4a3b-8908-9fb3fa1b39ff,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T18:55:51,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46359 {}] regionserver.HRegion(8855): Flush requested on ffcdd949a24b91f4996ce458c5691d39 2024-12-03T18:55:51,093 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing ffcdd949a24b91f4996ce458c5691d39 1/1 column families, dataSize=10.51 KB heapSize=11.50 KB 2024-12-03T18:55:51,099 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ffcdd949a24b91f4996ce458c5691d39/.tmp/info/4d1285d8386941d889c7464b5fa9c72d is 1080, key is row0015/info:/1733252150870/Put/seqid=0 2024-12-03T18:55:51,101 WARN [Thread-1044 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741898_1082 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T18:55:51,101 WARN [Thread-1044 {}] hdfs.DataStreamer(1731): Error Recovery for BP-619597313-172.17.0.2-1733252108905:blk_1073741898_1082 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44945,DS-65653d36-c9d6-40de-810f-8f14689aee01,DISK], DatanodeInfoWithStorage[127.0.0.1:36129,DS-77519873-4acf-4a3b-8908-9fb3fa1b39ff,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44945,DS-65653d36-c9d6-40de-810f-8f14689aee01,DISK]) is bad. 2024-12-03T18:55:51,101 WARN [Thread-1044 {}] hdfs.DataStreamer(1850): Abandoning BP-619597313-172.17.0.2-1733252108905:blk_1073741898_1082 2024-12-03T18:55:51,102 WARN [Thread-1044 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44945,DS-65653d36-c9d6-40de-810f-8f14689aee01,DISK] 2024-12-03T18:55:51,104 WARN [Thread-1044 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741899_1083 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:36129 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T18:55:51,104 INFO [regionserver/db5a5ccf5be8:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36129,DS-77519873-4acf-4a3b-8908-9fb3fa1b39ff,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T18:55:51,104 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1037783469_22 at /127.0.0.1:41630 [Receiving block BP-619597313-172.17.0.2-1733252108905:blk_1073741899_1083] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c4a60070-dd7e-802a-342e-5f20eae680fc/cluster_4187d5fb-6995-465f-5c7c-0ffcd2fcb479/data/data3, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c4a60070-dd7e-802a-342e-5f20eae680fc/cluster_4187d5fb-6995-465f-5c7c-0ffcd2fcb479/data/data4]'}, localName='127.0.0.1:37155', datanodeUuid='ff2ed10a-1a62-4254-b7ab-13ac5358207a', xmitsInProgress=0}:Exception transferring block BP-619597313-172.17.0.2-1733252108905:blk_1073741899_1083 to mirror 127.0.0.1:36129 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T18:55:51,104 INFO [regionserver/db5a5ccf5be8:0.logRoller {}] wal.FSHLog(556): LowReplication-Roller was enabled. 2024-12-03T18:55:51,104 WARN [Thread-1044 {}] hdfs.DataStreamer(1731): Error Recovery for BP-619597313-172.17.0.2-1733252108905:blk_1073741899_1083 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37155,DS-faa8c47c-a6f5-4610-9c09-f1450308231b,DISK], DatanodeInfoWithStorage[127.0.0.1:36129,DS-77519873-4acf-4a3b-8908-9fb3fa1b39ff,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:36129,DS-77519873-4acf-4a3b-8908-9fb3fa1b39ff,DISK]) is bad. 2024-12-03T18:55:51,104 WARN [Thread-1044 {}] hdfs.DataStreamer(1850): Abandoning BP-619597313-172.17.0.2-1733252108905:blk_1073741899_1083 2024-12-03T18:55:51,104 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1037783469_22 at /127.0.0.1:41630 [Receiving block BP-619597313-172.17.0.2-1733252108905:blk_1073741899_1083] {}] datanode.BlockReceiver(316): Block 1073741899 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-03T18:55:51,104 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1037783469_22 at /127.0.0.1:41630 [Receiving block BP-619597313-172.17.0.2-1733252108905:blk_1073741899_1083] {}] datanode.DataXceiver(331): 127.0.0.1:37155:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41630 dst: /127.0.0.1:37155 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T18:55:51,105 WARN [Thread-1044 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36129,DS-77519873-4acf-4a3b-8908-9fb3fa1b39ff,DISK] 2024-12-03T18:55:51,107 WARN [Thread-1044 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741900_1084 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:38523 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T18:55:51,107 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1037783469_22 at /127.0.0.1:34282 [Receiving block BP-619597313-172.17.0.2-1733252108905:blk_1073741900_1084] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c4a60070-dd7e-802a-342e-5f20eae680fc/cluster_4187d5fb-6995-465f-5c7c-0ffcd2fcb479/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c4a60070-dd7e-802a-342e-5f20eae680fc/cluster_4187d5fb-6995-465f-5c7c-0ffcd2fcb479/data/data10]'}, localName='127.0.0.1:33593', datanodeUuid='ac0ce44c-5180-4526-a3b0-db40627349b1', xmitsInProgress=0}:Exception transferring block BP-619597313-172.17.0.2-1733252108905:blk_1073741900_1084 to mirror 127.0.0.1:38523 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T18:55:51,107 WARN [Thread-1044 {}] hdfs.DataStreamer(1731): Error Recovery for BP-619597313-172.17.0.2-1733252108905:blk_1073741900_1084 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33593,DS-49a6f974-45fa-4a38-a4e1-bea744d45f50,DISK], DatanodeInfoWithStorage[127.0.0.1:38523,DS-415cd1d5-a105-4c92-b8f8-993dcb323415,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:38523,DS-415cd1d5-a105-4c92-b8f8-993dcb323415,DISK]) is bad. 2024-12-03T18:55:51,107 WARN [Thread-1044 {}] hdfs.DataStreamer(1850): Abandoning BP-619597313-172.17.0.2-1733252108905:blk_1073741900_1084 2024-12-03T18:55:51,108 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1037783469_22 at /127.0.0.1:34282 [Receiving block BP-619597313-172.17.0.2-1733252108905:blk_1073741900_1084] {}] datanode.BlockReceiver(316): Block 1073741900 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-03T18:55:51,108 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1037783469_22 at /127.0.0.1:34282 [Receiving block BP-619597313-172.17.0.2-1733252108905:blk_1073741900_1084] {}] datanode.DataXceiver(331): 127.0.0.1:33593:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:34282 dst: /127.0.0.1:33593 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T18:55:51,108 WARN [Thread-1044 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38523,DS-415cd1d5-a105-4c92-b8f8-993dcb323415,DISK] 2024-12-03T18:55:51,118 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37155 is added to blk_1073741901_1085 (size=14660) 2024-12-03T18:55:51,118 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33593 is added to blk_1073741901_1085 (size=14660) 2024-12-03T18:55:51,119 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=10.51 KB at sequenceid=79 (bloomFilter=true), to=hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ffcdd949a24b91f4996ce458c5691d39/.tmp/info/4d1285d8386941d889c7464b5fa9c72d 2024-12-03T18:55:51,126 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ffcdd949a24b91f4996ce458c5691d39/.tmp/info/4d1285d8386941d889c7464b5fa9c72d as hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ffcdd949a24b91f4996ce458c5691d39/info/4d1285d8386941d889c7464b5fa9c72d 2024-12-03T18:55:51,134 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ffcdd949a24b91f4996ce458c5691d39/info/4d1285d8386941d889c7464b5fa9c72d, entries=9, sequenceid=79, filesize=14.3 K 2024-12-03T18:55:51,135 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~10.51 KB/10758, heapSize ~11.48 KB/11760, currentSize=0 B/0 for ffcdd949a24b91f4996ce458c5691d39 in 42ms, sequenceid=79, compaction requested=true 2024-12-03T18:55:51,135 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for ffcdd949a24b91f4996ce458c5691d39: 2024-12-03T18:55:51,135 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=40.0 K, sizeToCheck=16.0 K 2024-12-03T18:55:51,135 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-03T18:55:51,135 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ffcdd949a24b91f4996ce458c5691d39/info/04db6e607b2a4790b7ac8e995dd19397 because midkey is the same as first or last row 2024-12-03T18:55:51,135 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ffcdd949a24b91f4996ce458c5691d39:info, priority=-2147483648, current under compaction store size is 1 2024-12-03T18:55:51,135 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T18:55:51,136 DEBUG [RS:0;db5a5ccf5be8:46359-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-03T18:55:51,137 DEBUG [RS:0;db5a5ccf5be8:46359-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40947 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-03T18:55:51,137 DEBUG [RS:0;db5a5ccf5be8:46359-shortCompactions-0 {}] regionserver.HStore(1541): ffcdd949a24b91f4996ce458c5691d39/info is initiating minor compaction (all files) 2024-12-03T18:55:51,137 INFO [RS:0;db5a5ccf5be8:46359-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of ffcdd949a24b91f4996ce458c5691d39/info in TestLogRolling-testLogRollOnDatanodeDeath,,1733252112988.ffcdd949a24b91f4996ce458c5691d39. 2024-12-03T18:55:51,137 INFO [RS:0;db5a5ccf5be8:46359-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ffcdd949a24b91f4996ce458c5691d39/info/04db6e607b2a4790b7ac8e995dd19397, hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ffcdd949a24b91f4996ce458c5691d39/info/8d35a366cd04493da8d5bf339d3cd8d6, hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ffcdd949a24b91f4996ce458c5691d39/info/4d1285d8386941d889c7464b5fa9c72d] into tmpdir=hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ffcdd949a24b91f4996ce458c5691d39/.tmp, totalSize=40.0 K 2024-12-03T18:55:51,138 DEBUG [RS:0;db5a5ccf5be8:46359-shortCompactions-0 {}] compactions.Compactor(225): Compacting 04db6e607b2a4790b7ac8e995dd19397, keycount=12, bloomtype=ROW, size=17.7 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1733252127070 2024-12-03T18:55:51,138 DEBUG [RS:0;db5a5ccf5be8:46359-shortCompactions-0 {}] compactions.Compactor(225): Compacting 8d35a366cd04493da8d5bf339d3cd8d6, keycount=3, bloomtype=ROW, size=8.0 K, encoding=NONE, compression=NONE, seqNum=66, earliestPutTs=1733252137052 2024-12-03T18:55:51,138 DEBUG [RS:0;db5a5ccf5be8:46359-shortCompactions-0 {}] compactions.Compactor(225): Compacting 4d1285d8386941d889c7464b5fa9c72d, keycount=9, bloomtype=ROW, size=14.3 K, encoding=NONE, compression=NONE, seqNum=79, earliestPutTs=1733252150870 2024-12-03T18:55:51,152 INFO [RS:0;db5a5ccf5be8:46359-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ffcdd949a24b91f4996ce458c5691d39#info#compaction#27 average throughput is 11.29 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-03T18:55:51,153 DEBUG [RS:0;db5a5ccf5be8:46359-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ffcdd949a24b91f4996ce458c5691d39/.tmp/info/48b64ea509fc4e84bf1608b9897fbca1 is 1080, key is row0002/info:/1733252127070/Put/seqid=0 2024-12-03T18:55:51,155 WARN [Thread-1053 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741902_1086 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T18:55:51,155 WARN [Thread-1053 {}] hdfs.DataStreamer(1731): Error Recovery for BP-619597313-172.17.0.2-1733252108905:blk_1073741902_1086 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36129,DS-77519873-4acf-4a3b-8908-9fb3fa1b39ff,DISK], DatanodeInfoWithStorage[127.0.0.1:44945,DS-65653d36-c9d6-40de-810f-8f14689aee01,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36129,DS-77519873-4acf-4a3b-8908-9fb3fa1b39ff,DISK]) is bad. 2024-12-03T18:55:51,155 WARN [Thread-1053 {}] hdfs.DataStreamer(1850): Abandoning BP-619597313-172.17.0.2-1733252108905:blk_1073741902_1086 2024-12-03T18:55:51,156 WARN [Thread-1053 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36129,DS-77519873-4acf-4a3b-8908-9fb3fa1b39ff,DISK] 2024-12-03T18:55:51,158 WARN [Thread-1053 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741903_1087 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:38523 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T18:55:51,158 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1037783469_22 at /127.0.0.1:34320 [Receiving block BP-619597313-172.17.0.2-1733252108905:blk_1073741903_1087] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c4a60070-dd7e-802a-342e-5f20eae680fc/cluster_4187d5fb-6995-465f-5c7c-0ffcd2fcb479/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c4a60070-dd7e-802a-342e-5f20eae680fc/cluster_4187d5fb-6995-465f-5c7c-0ffcd2fcb479/data/data10]'}, localName='127.0.0.1:33593', datanodeUuid='ac0ce44c-5180-4526-a3b0-db40627349b1', xmitsInProgress=0}:Exception transferring block BP-619597313-172.17.0.2-1733252108905:blk_1073741903_1087 to mirror 127.0.0.1:38523 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T18:55:51,159 WARN [Thread-1053 {}] hdfs.DataStreamer(1731): Error Recovery for BP-619597313-172.17.0.2-1733252108905:blk_1073741903_1087 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33593,DS-49a6f974-45fa-4a38-a4e1-bea744d45f50,DISK], DatanodeInfoWithStorage[127.0.0.1:38523,DS-415cd1d5-a105-4c92-b8f8-993dcb323415,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:38523,DS-415cd1d5-a105-4c92-b8f8-993dcb323415,DISK]) is bad. 2024-12-03T18:55:51,159 WARN [Thread-1053 {}] hdfs.DataStreamer(1850): Abandoning BP-619597313-172.17.0.2-1733252108905:blk_1073741903_1087 2024-12-03T18:55:51,159 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1037783469_22 at /127.0.0.1:34320 [Receiving block BP-619597313-172.17.0.2-1733252108905:blk_1073741903_1087] {}] datanode.BlockReceiver(316): Block 1073741903 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-03T18:55:51,159 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1037783469_22 at /127.0.0.1:34320 [Receiving block BP-619597313-172.17.0.2-1733252108905:blk_1073741903_1087] {}] datanode.DataXceiver(331): 127.0.0.1:33593:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:34320 dst: /127.0.0.1:33593 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T18:55:51,159 WARN [Thread-1053 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38523,DS-415cd1d5-a105-4c92-b8f8-993dcb323415,DISK] 2024-12-03T18:55:51,161 WARN [Thread-1053 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741904_1088 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T18:55:51,161 WARN [Thread-1053 {}] hdfs.DataStreamer(1731): Error Recovery for BP-619597313-172.17.0.2-1733252108905:blk_1073741904_1088 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44945,DS-65653d36-c9d6-40de-810f-8f14689aee01,DISK], DatanodeInfoWithStorage[127.0.0.1:37155,DS-faa8c47c-a6f5-4610-9c09-f1450308231b,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44945,DS-65653d36-c9d6-40de-810f-8f14689aee01,DISK]) is bad. 2024-12-03T18:55:51,161 WARN [Thread-1053 {}] hdfs.DataStreamer(1850): Abandoning BP-619597313-172.17.0.2-1733252108905:blk_1073741904_1088 2024-12-03T18:55:51,162 WARN [Thread-1053 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44945,DS-65653d36-c9d6-40de-810f-8f14689aee01,DISK] 2024-12-03T18:55:51,173 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37155 is added to blk_1073741905_1089 (size=28989) 2024-12-03T18:55:51,173 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33593 is added to blk_1073741905_1089 (size=28989) 2024-12-03T18:55:51,180 DEBUG [RS:0;db5a5ccf5be8:46359-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ffcdd949a24b91f4996ce458c5691d39/.tmp/info/48b64ea509fc4e84bf1608b9897fbca1 as hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ffcdd949a24b91f4996ce458c5691d39/info/48b64ea509fc4e84bf1608b9897fbca1 2024-12-03T18:55:51,188 INFO [RS:0;db5a5ccf5be8:46359-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in ffcdd949a24b91f4996ce458c5691d39/info of ffcdd949a24b91f4996ce458c5691d39 into 48b64ea509fc4e84bf1608b9897fbca1(size=28.3 K), total size for store is 28.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T18:55:51,188 DEBUG [RS:0;db5a5ccf5be8:46359-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for ffcdd949a24b91f4996ce458c5691d39: 2024-12-03T18:55:51,188 INFO [RS:0;db5a5ccf5be8:46359-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRollOnDatanodeDeath,,1733252112988.ffcdd949a24b91f4996ce458c5691d39., storeName=ffcdd949a24b91f4996ce458c5691d39/info, priority=13, startTime=1733252151135; duration=0sec 2024-12-03T18:55:51,188 DEBUG [RS:0;db5a5ccf5be8:46359-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=28.3 K, sizeToCheck=16.0 K 2024-12-03T18:55:51,188 DEBUG [RS:0;db5a5ccf5be8:46359-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-03T18:55:51,188 DEBUG [RS:0;db5a5ccf5be8:46359-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ffcdd949a24b91f4996ce458c5691d39/info/48b64ea509fc4e84bf1608b9897fbca1 because midkey is the same as first or last row 2024-12-03T18:55:51,188 DEBUG [RS:0;db5a5ccf5be8:46359-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=28.3 K, sizeToCheck=16.0 K 2024-12-03T18:55:51,188 DEBUG [RS:0;db5a5ccf5be8:46359-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-03T18:55:51,188 DEBUG [RS:0;db5a5ccf5be8:46359-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ffcdd949a24b91f4996ce458c5691d39/info/48b64ea509fc4e84bf1608b9897fbca1 because midkey is the same as first or last row 2024-12-03T18:55:51,188 DEBUG [RS:0;db5a5ccf5be8:46359-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=28.3 K, sizeToCheck=16.0 K 2024-12-03T18:55:51,188 DEBUG [RS:0;db5a5ccf5be8:46359-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-03T18:55:51,188 DEBUG [RS:0;db5a5ccf5be8:46359-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ffcdd949a24b91f4996ce458c5691d39/info/48b64ea509fc4e84bf1608b9897fbca1 because midkey is the same as first or last row 2024-12-03T18:55:51,188 DEBUG [RS:0;db5a5ccf5be8:46359-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T18:55:51,188 DEBUG [RS:0;db5a5ccf5be8:46359-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ffcdd949a24b91f4996ce458c5691d39:info 2024-12-03T18:55:51,294 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-03T18:55:51,294 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-03T18:55:51,294 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-03T18:55:51,294 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T18:55:51,294 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T18:55:51,294 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-03T18:55:51,295 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-03T18:55:51,295 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1263878371, stopped=false 2024-12-03T18:55:51,295 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=db5a5ccf5be8,39725,1733252111031 2024-12-03T18:55:51,345 DEBUG [pool-381-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44293-0x1019c8c56530002, quorum=127.0.0.1:53957, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-03T18:55:51,345 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39725-0x1019c8c56530000, quorum=127.0.0.1:53957, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-03T18:55:51,345 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46359-0x1019c8c56530001, quorum=127.0.0.1:53957, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-03T18:55:51,345 DEBUG [pool-381-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44293-0x1019c8c56530002, quorum=127.0.0.1:53957, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T18:55:51,345 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39725-0x1019c8c56530000, quorum=127.0.0.1:53957, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T18:55:51,345 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46359-0x1019c8c56530001, quorum=127.0.0.1:53957, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T18:55:51,345 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-03T18:55:51,345 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-03T18:55:51,345 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-03T18:55:51,345 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T18:55:51,345 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'db5a5ccf5be8,46359,1733252111199' ***** 2024-12-03T18:55:51,345 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-03T18:55:51,345 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'db5a5ccf5be8,44293,1733252112865' ***** 2024-12-03T18:55:51,346 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-03T18:55:51,346 INFO [RS:0;db5a5ccf5be8:46359 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-03T18:55:51,346 INFO [RS:1;db5a5ccf5be8:44293 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-03T18:55:51,346 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-03T18:55:51,346 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-03T18:55:51,346 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:46359-0x1019c8c56530001, quorum=127.0.0.1:53957, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-03T18:55:51,346 INFO [RS:1;db5a5ccf5be8:44293 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-03T18:55:51,346 INFO [RS:1;db5a5ccf5be8:44293 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-03T18:55:51,346 INFO [RS:1;db5a5ccf5be8:44293 {}] regionserver.HRegionServer(959): stopping server db5a5ccf5be8,44293,1733252112865 2024-12-03T18:55:51,346 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:39725-0x1019c8c56530000, quorum=127.0.0.1:53957, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-03T18:55:51,346 INFO [RS:1;db5a5ccf5be8:44293 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-03T18:55:51,346 INFO [RS:1;db5a5ccf5be8:44293 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;db5a5ccf5be8:44293. 2024-12-03T18:55:51,347 DEBUG [RS:1;db5a5ccf5be8:44293 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-03T18:55:51,347 DEBUG [RS:1;db5a5ccf5be8:44293 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T18:55:51,347 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:44293-0x1019c8c56530002, quorum=127.0.0.1:53957, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-03T18:55:51,347 INFO [RS:0;db5a5ccf5be8:46359 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-03T18:55:51,347 INFO [RS:1;db5a5ccf5be8:44293 {}] regionserver.HRegionServer(976): stopping server db5a5ccf5be8,44293,1733252112865; all regions closed. 2024-12-03T18:55:51,347 INFO [RS:0;db5a5ccf5be8:46359 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-03T18:55:51,347 INFO [RS:0;db5a5ccf5be8:46359 {}] regionserver.HRegionServer(3091): Received CLOSE for ffcdd949a24b91f4996ce458c5691d39 2024-12-03T18:55:51,347 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:55:51,347 INFO [RS:0;db5a5ccf5be8:46359 {}] regionserver.HRegionServer(959): stopping server db5a5ccf5be8,46359,1733252111199 2024-12-03T18:55:51,347 INFO [RS:0;db5a5ccf5be8:46359 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-03T18:55:51,347 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:55:51,347 INFO [RS:0;db5a5ccf5be8:46359 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;db5a5ccf5be8:46359. 2024-12-03T18:55:51,348 DEBUG [RS:0;db5a5ccf5be8:46359 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-03T18:55:51,348 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:55:51,348 DEBUG [RS:0;db5a5ccf5be8:46359 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T18:55:51,348 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:55:51,348 INFO [RS:0;db5a5ccf5be8:46359 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-03T18:55:51,348 INFO [RS:0;db5a5ccf5be8:46359 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-03T18:55:51,348 INFO [RS:0;db5a5ccf5be8:46359 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-03T18:55:51,348 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:55:51,348 INFO [RS:0;db5a5ccf5be8:46359 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-03T18:55:51,348 DEBUG [RS_CLOSE_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing ffcdd949a24b91f4996ce458c5691d39, disabling compactions & flushes 2024-12-03T18:55:51,349 INFO [RS_CLOSE_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnDatanodeDeath,,1733252112988.ffcdd949a24b91f4996ce458c5691d39. 2024-12-03T18:55:51,349 DEBUG [RS_CLOSE_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1733252112988.ffcdd949a24b91f4996ce458c5691d39. 2024-12-03T18:55:51,349 DEBUG [RS_CLOSE_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1733252112988.ffcdd949a24b91f4996ce458c5691d39. after waiting 0 ms 2024-12-03T18:55:51,349 DEBUG [RS_CLOSE_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnDatanodeDeath,,1733252112988.ffcdd949a24b91f4996ce458c5691d39. 2024-12-03T18:55:51,349 INFO [RS:0;db5a5ccf5be8:46359 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-12-03T18:55:51,349 DEBUG [RS:0;db5a5ccf5be8:46359 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, ffcdd949a24b91f4996ce458c5691d39=TestLogRolling-testLogRollOnDatanodeDeath,,1733252112988.ffcdd949a24b91f4996ce458c5691d39.} 2024-12-03T18:55:51,349 DEBUG [RS:0;db5a5ccf5be8:46359 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, ffcdd949a24b91f4996ce458c5691d39 2024-12-03T18:55:51,349 DEBUG [RS_CLOSE_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-03T18:55:51,349 INFO [RS_CLOSE_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-03T18:55:51,350 DEBUG [RS_CLOSE_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-03T18:55:51,350 DEBUG [RS_CLOSE_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-03T18:55:51,350 DEBUG [RS_CLOSE_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-03T18:55:51,350 INFO [RS_CLOSE_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.71 KB heapSize=3.75 KB 2024-12-03T18:55:51,350 ERROR [FSHLog-0-hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6-prefix:db5a5ccf5be8,46359,1733252111199.meta {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36129,DS-77519873-4acf-4a3b-8908-9fb3fa1b39ff,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T18:55:51,350 WARN [FSHLog-0-hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6-prefix:db5a5ccf5be8,46359,1733252111199.meta {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36129,DS-77519873-4acf-4a3b-8908-9fb3fa1b39ff,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T18:55:51,351 DEBUG [regionserver/db5a5ccf5be8:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog db5a5ccf5be8%2C46359%2C1733252111199.meta:.meta(num 1733252112663) roll requested 2024-12-03T18:55:51,351 INFO [regionserver/db5a5ccf5be8:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor db5a5ccf5be8%2C46359%2C1733252111199.meta.1733252151351.meta 2024-12-03T18:55:51,356 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36129,DS-77519873-4acf-4a3b-8908-9fb3fa1b39ff,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T18:55:51,357 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36129,DS-77519873-4acf-4a3b-8908-9fb3fa1b39ff,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T18:55:51,357 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,44293,1733252112865/db5a5ccf5be8%2C44293%2C1733252112865.1733252113091 2024-12-03T18:55:51,357 WARN [IPC Server handler 1 on default port 37681 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,44293,1733252112865/db5a5ccf5be8%2C44293%2C1733252112865.1733252113091 has not been closed. Lease recovery is in progress. RecoveryId = 1090 for block blk_1073741837_1013 2024-12-03T18:55:51,357 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,44293,1733252112865/db5a5ccf5be8%2C44293%2C1733252112865.1733252113091 after 0ms 2024-12-03T18:55:51,360 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733252112988.ffcdd949a24b91f4996ce458c5691d39.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ffcdd949a24b91f4996ce458c5691d39/info/e99a68311ede435486014355f6dd200c, hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ffcdd949a24b91f4996ce458c5691d39/info/4033dc9a7bae4fb9956f9c39e55e8645, hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ffcdd949a24b91f4996ce458c5691d39/info/e5666c7d62914286ab14a862b04d02d7, hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ffcdd949a24b91f4996ce458c5691d39/info/868ad96ca8934859a6e00e6407b83e91, hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ffcdd949a24b91f4996ce458c5691d39/info/36bbcc6c3bb64f4aa817826c86b5c400, hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ffcdd949a24b91f4996ce458c5691d39/info/04db6e607b2a4790b7ac8e995dd19397, hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ffcdd949a24b91f4996ce458c5691d39/info/1bbe4f46127641389cdfa6e89b127ba9, hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ffcdd949a24b91f4996ce458c5691d39/info/8d35a366cd04493da8d5bf339d3cd8d6, hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ffcdd949a24b91f4996ce458c5691d39/info/4d1285d8386941d889c7464b5fa9c72d] to archive 2024-12-03T18:55:51,362 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733252112988.ffcdd949a24b91f4996ce458c5691d39.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-03T18:55:51,367 WARN [Thread-1060 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741906_1091 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:44945 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T18:55:51,367 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733252112988.ffcdd949a24b91f4996ce458c5691d39.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ffcdd949a24b91f4996ce458c5691d39/info/e99a68311ede435486014355f6dd200c to hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ffcdd949a24b91f4996ce458c5691d39/info/e99a68311ede435486014355f6dd200c 2024-12-03T18:55:51,367 WARN [Thread-1060 {}] hdfs.DataStreamer(1731): Error Recovery for BP-619597313-172.17.0.2-1733252108905:blk_1073741906_1091 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33593,DS-49a6f974-45fa-4a38-a4e1-bea744d45f50,DISK], DatanodeInfoWithStorage[127.0.0.1:44945,DS-65653d36-c9d6-40de-810f-8f14689aee01,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:44945,DS-65653d36-c9d6-40de-810f-8f14689aee01,DISK]) is bad. 2024-12-03T18:55:51,367 WARN [Thread-1060 {}] hdfs.DataStreamer(1850): Abandoning BP-619597313-172.17.0.2-1733252108905:blk_1073741906_1091 2024-12-03T18:55:51,368 WARN [Thread-1060 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44945,DS-65653d36-c9d6-40de-810f-8f14689aee01,DISK] 2024-12-03T18:55:51,367 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1037783469_22 at /127.0.0.1:34346 [Receiving block BP-619597313-172.17.0.2-1733252108905:blk_1073741906_1091] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c4a60070-dd7e-802a-342e-5f20eae680fc/cluster_4187d5fb-6995-465f-5c7c-0ffcd2fcb479/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c4a60070-dd7e-802a-342e-5f20eae680fc/cluster_4187d5fb-6995-465f-5c7c-0ffcd2fcb479/data/data10]'}, localName='127.0.0.1:33593', datanodeUuid='ac0ce44c-5180-4526-a3b0-db40627349b1', xmitsInProgress=0}:Exception transferring block BP-619597313-172.17.0.2-1733252108905:blk_1073741906_1091 to mirror 127.0.0.1:44945 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T18:55:51,369 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1037783469_22 at /127.0.0.1:34346 [Receiving block BP-619597313-172.17.0.2-1733252108905:blk_1073741906_1091] {}] datanode.BlockReceiver(316): Block 1073741906 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-12-03T18:55:51,369 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1037783469_22 at /127.0.0.1:34346 [Receiving block BP-619597313-172.17.0.2-1733252108905:blk_1073741906_1091] {}] datanode.DataXceiver(331): 127.0.0.1:33593:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:34346 dst: /127.0.0.1:33593 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T18:55:51,370 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733252112988.ffcdd949a24b91f4996ce458c5691d39.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ffcdd949a24b91f4996ce458c5691d39/info/4033dc9a7bae4fb9956f9c39e55e8645 to hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ffcdd949a24b91f4996ce458c5691d39/info/4033dc9a7bae4fb9956f9c39e55e8645 2024-12-03T18:55:51,370 WARN [Thread-1060 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741907_1092 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T18:55:51,370 WARN [Thread-1060 {}] hdfs.DataStreamer(1731): Error Recovery for BP-619597313-172.17.0.2-1733252108905:blk_1073741907_1092 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36129,DS-77519873-4acf-4a3b-8908-9fb3fa1b39ff,DISK], DatanodeInfoWithStorage[127.0.0.1:33593,DS-49a6f974-45fa-4a38-a4e1-bea744d45f50,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36129,DS-77519873-4acf-4a3b-8908-9fb3fa1b39ff,DISK]) is bad. 2024-12-03T18:55:51,370 WARN [Thread-1060 {}] hdfs.DataStreamer(1850): Abandoning BP-619597313-172.17.0.2-1733252108905:blk_1073741907_1092 2024-12-03T18:55:51,371 WARN [Thread-1060 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36129,DS-77519873-4acf-4a3b-8908-9fb3fa1b39ff,DISK] 2024-12-03T18:55:51,372 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733252112988.ffcdd949a24b91f4996ce458c5691d39.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ffcdd949a24b91f4996ce458c5691d39/info/e5666c7d62914286ab14a862b04d02d7 to hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ffcdd949a24b91f4996ce458c5691d39/info/e5666c7d62914286ab14a862b04d02d7 2024-12-03T18:55:51,374 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733252112988.ffcdd949a24b91f4996ce458c5691d39.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ffcdd949a24b91f4996ce458c5691d39/info/868ad96ca8934859a6e00e6407b83e91 to hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ffcdd949a24b91f4996ce458c5691d39/info/868ad96ca8934859a6e00e6407b83e91 2024-12-03T18:55:51,376 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733252112988.ffcdd949a24b91f4996ce458c5691d39.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ffcdd949a24b91f4996ce458c5691d39/info/36bbcc6c3bb64f4aa817826c86b5c400 to hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ffcdd949a24b91f4996ce458c5691d39/info/36bbcc6c3bb64f4aa817826c86b5c400 2024-12-03T18:55:51,378 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733252112988.ffcdd949a24b91f4996ce458c5691d39.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ffcdd949a24b91f4996ce458c5691d39/info/04db6e607b2a4790b7ac8e995dd19397 to hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ffcdd949a24b91f4996ce458c5691d39/info/04db6e607b2a4790b7ac8e995dd19397 2024-12-03T18:55:51,380 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733252112988.ffcdd949a24b91f4996ce458c5691d39.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ffcdd949a24b91f4996ce458c5691d39/info/1bbe4f46127641389cdfa6e89b127ba9 to hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ffcdd949a24b91f4996ce458c5691d39/info/1bbe4f46127641389cdfa6e89b127ba9 2024-12-03T18:55:51,382 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733252112988.ffcdd949a24b91f4996ce458c5691d39.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ffcdd949a24b91f4996ce458c5691d39/info/8d35a366cd04493da8d5bf339d3cd8d6 to hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ffcdd949a24b91f4996ce458c5691d39/info/8d35a366cd04493da8d5bf339d3cd8d6 2024-12-03T18:55:51,384 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733252112988.ffcdd949a24b91f4996ce458c5691d39.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ffcdd949a24b91f4996ce458c5691d39/info/4d1285d8386941d889c7464b5fa9c72d to hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ffcdd949a24b91f4996ce458c5691d39/info/4d1285d8386941d889c7464b5fa9c72d 2024-12-03T18:55:51,384 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733252112988.ffcdd949a24b91f4996ce458c5691d39.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=db5a5ccf5be8:39725 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 16 more 2024-12-03T18:55:51,385 WARN [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733252112988.ffcdd949a24b91f4996ce458c5691d39.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [e99a68311ede435486014355f6dd200c=10347, 4033dc9a7bae4fb9956f9c39e55e8645=12506, e5666c7d62914286ab14a862b04d02d7=17994, 868ad96ca8934859a6e00e6407b83e91=6027, 36bbcc6c3bb64f4aa817826c86b5c400=6027, 04db6e607b2a4790b7ac8e995dd19397=18097, 1bbe4f46127641389cdfa6e89b127ba9=6027, 8d35a366cd04493da8d5bf339d3cd8d6=8190, 4d1285d8386941d889c7464b5fa9c72d=14660] 2024-12-03T18:55:51,401 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:55:51,401 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:55:51,401 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:55:51,402 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:55:51,402 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:55:51,402 INFO [regionserver/db5a5ccf5be8:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.meta.1733252112663.meta with entries=8, filesize=2.33 KB; new WAL /user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.meta.1733252151351.meta 2024-12-03T18:55:51,403 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36129,DS-77519873-4acf-4a3b-8908-9fb3fa1b39ff,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T18:55:51,403 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36129,DS-77519873-4acf-4a3b-8908-9fb3fa1b39ff,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T18:55:51,403 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.meta.1733252112663.meta 2024-12-03T18:55:51,403 WARN [IPC Server handler 0 on default port 37681 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.meta.1733252112663.meta has not been closed. Lease recovery is in progress. RecoveryId = 1094 for block blk_1073741834_1010 2024-12-03T18:55:51,404 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.meta.1733252112663.meta after 1ms 2024-12-03T18:55:51,410 DEBUG [RS_CLOSE_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/data/default/TestLogRolling-testLogRollOnDatanodeDeath/ffcdd949a24b91f4996ce458c5691d39/recovered.edits/83.seqid, newMaxSeqId=83, maxSeqId=1 2024-12-03T18:55:51,411 INFO [RS_CLOSE_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1733252112988.ffcdd949a24b91f4996ce458c5691d39. 2024-12-03T18:55:51,411 DEBUG [RS_CLOSE_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for ffcdd949a24b91f4996ce458c5691d39: Waiting for close lock at 1733252151348Running coprocessor pre-close hooks at 1733252151348Disabling compacts and flushes for region at 1733252151348Disabling writes for close at 1733252151349 (+1 ms)Writing region close event to WAL at 1733252151400 (+51 ms)Running coprocessor post-close hooks at 1733252151411 (+11 ms)Closed at 1733252151411 2024-12-03T18:55:51,411 DEBUG [RS_CLOSE_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1733252112988.ffcdd949a24b91f4996ce458c5691d39. 2024-12-03T18:55:51,412 DEBUG [regionserver/db5a5ccf5be8:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44423:44423),(127.0.0.1/127.0.0.1:44371:44371)] 2024-12-03T18:55:51,412 DEBUG [regionserver/db5a5ccf5be8:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.meta.1733252112663.meta is not closed yet, will try archiving it next time 2024-12-03T18:55:51,433 DEBUG [RS_CLOSE_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/data/hbase/meta/1588230740/.tmp/info/05944f98ab5a48e49dfa4c0a67b5387b is 203, key is TestLogRolling-testLogRollOnDatanodeDeath,,1733252112988.ffcdd949a24b91f4996ce458c5691d39./info:regioninfo/1733252113355/Put/seqid=0 2024-12-03T18:55:51,436 WARN [Thread-1068 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741909_1095 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T18:55:51,436 WARN [Thread-1068 {}] hdfs.DataStreamer(1731): Error Recovery for BP-619597313-172.17.0.2-1733252108905:blk_1073741909_1095 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44945,DS-65653d36-c9d6-40de-810f-8f14689aee01,DISK], DatanodeInfoWithStorage[127.0.0.1:37155,DS-faa8c47c-a6f5-4610-9c09-f1450308231b,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44945,DS-65653d36-c9d6-40de-810f-8f14689aee01,DISK]) is bad. 2024-12-03T18:55:51,436 WARN [Thread-1068 {}] hdfs.DataStreamer(1850): Abandoning BP-619597313-172.17.0.2-1733252108905:blk_1073741909_1095 2024-12-03T18:55:51,438 WARN [Thread-1068 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44945,DS-65653d36-c9d6-40de-810f-8f14689aee01,DISK] 2024-12-03T18:55:51,440 WARN [Thread-1068 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741910_1096 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T18:55:51,440 WARN [Thread-1068 {}] hdfs.DataStreamer(1731): Error Recovery for BP-619597313-172.17.0.2-1733252108905:blk_1073741910_1096 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38523,DS-415cd1d5-a105-4c92-b8f8-993dcb323415,DISK], DatanodeInfoWithStorage[127.0.0.1:36129,DS-77519873-4acf-4a3b-8908-9fb3fa1b39ff,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38523,DS-415cd1d5-a105-4c92-b8f8-993dcb323415,DISK]) is bad. 2024-12-03T18:55:51,440 WARN [Thread-1068 {}] hdfs.DataStreamer(1850): Abandoning BP-619597313-172.17.0.2-1733252108905:blk_1073741910_1096 2024-12-03T18:55:51,441 WARN [Thread-1068 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38523,DS-415cd1d5-a105-4c92-b8f8-993dcb323415,DISK] 2024-12-03T18:55:51,443 WARN [Thread-1068 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741911_1097 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T18:55:51,443 WARN [Thread-1068 {}] hdfs.DataStreamer(1731): Error Recovery for BP-619597313-172.17.0.2-1733252108905:blk_1073741911_1097 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36129,DS-77519873-4acf-4a3b-8908-9fb3fa1b39ff,DISK], DatanodeInfoWithStorage[127.0.0.1:37155,DS-faa8c47c-a6f5-4610-9c09-f1450308231b,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36129,DS-77519873-4acf-4a3b-8908-9fb3fa1b39ff,DISK]) is bad. 2024-12-03T18:55:51,443 WARN [Thread-1068 {}] hdfs.DataStreamer(1850): Abandoning BP-619597313-172.17.0.2-1733252108905:blk_1073741911_1097 2024-12-03T18:55:51,444 WARN [Thread-1068 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36129,DS-77519873-4acf-4a3b-8908-9fb3fa1b39ff,DISK] 2024-12-03T18:55:51,454 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33593 is added to blk_1073741912_1098 (size=7089) 2024-12-03T18:55:51,454 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37155 is added to blk_1073741912_1098 (size=7089) 2024-12-03T18:55:51,455 INFO [RS_CLOSE_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.50 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/data/hbase/meta/1588230740/.tmp/info/05944f98ab5a48e49dfa4c0a67b5387b 2024-12-03T18:55:51,483 DEBUG [RS_CLOSE_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/data/hbase/meta/1588230740/.tmp/ns/534b3d7a968f4f1da371443c4b63ebab is 43, key is default/ns:d/1733252112749/Put/seqid=0 2024-12-03T18:55:51,485 WARN [Thread-1074 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741913_1099 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T18:55:51,486 WARN [Thread-1074 {}] hdfs.DataStreamer(1731): Error Recovery for BP-619597313-172.17.0.2-1733252108905:blk_1073741913_1099 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44945,DS-65653d36-c9d6-40de-810f-8f14689aee01,DISK], DatanodeInfoWithStorage[127.0.0.1:38523,DS-415cd1d5-a105-4c92-b8f8-993dcb323415,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44945,DS-65653d36-c9d6-40de-810f-8f14689aee01,DISK]) is bad. 2024-12-03T18:55:51,486 WARN [Thread-1074 {}] hdfs.DataStreamer(1850): Abandoning BP-619597313-172.17.0.2-1733252108905:blk_1073741913_1099 2024-12-03T18:55:51,486 WARN [Thread-1074 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44945,DS-65653d36-c9d6-40de-810f-8f14689aee01,DISK] 2024-12-03T18:55:51,488 WARN [Thread-1074 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741914_1100 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T18:55:51,488 WARN [Thread-1074 {}] hdfs.DataStreamer(1731): Error Recovery for BP-619597313-172.17.0.2-1733252108905:blk_1073741914_1100 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38523,DS-415cd1d5-a105-4c92-b8f8-993dcb323415,DISK], DatanodeInfoWithStorage[127.0.0.1:33593,DS-49a6f974-45fa-4a38-a4e1-bea744d45f50,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38523,DS-415cd1d5-a105-4c92-b8f8-993dcb323415,DISK]) is bad. 2024-12-03T18:55:51,488 WARN [Thread-1074 {}] hdfs.DataStreamer(1850): Abandoning BP-619597313-172.17.0.2-1733252108905:blk_1073741914_1100 2024-12-03T18:55:51,489 WARN [Thread-1074 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38523,DS-415cd1d5-a105-4c92-b8f8-993dcb323415,DISK] 2024-12-03T18:55:51,497 WARN [Thread-1074 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741915_1101 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:36129 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T18:55:51,497 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1037783469_22 at /127.0.0.1:34378 [Receiving block BP-619597313-172.17.0.2-1733252108905:blk_1073741915_1101] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c4a60070-dd7e-802a-342e-5f20eae680fc/cluster_4187d5fb-6995-465f-5c7c-0ffcd2fcb479/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c4a60070-dd7e-802a-342e-5f20eae680fc/cluster_4187d5fb-6995-465f-5c7c-0ffcd2fcb479/data/data10]'}, localName='127.0.0.1:33593', datanodeUuid='ac0ce44c-5180-4526-a3b0-db40627349b1', xmitsInProgress=0}:Exception transferring block BP-619597313-172.17.0.2-1733252108905:blk_1073741915_1101 to mirror 127.0.0.1:36129 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T18:55:51,497 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1037783469_22 at /127.0.0.1:34378 [Receiving block BP-619597313-172.17.0.2-1733252108905:blk_1073741915_1101] {}] datanode.BlockReceiver(316): Block 1073741915 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-03T18:55:51,497 WARN [Thread-1074 {}] hdfs.DataStreamer(1731): Error Recovery for BP-619597313-172.17.0.2-1733252108905:blk_1073741915_1101 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33593,DS-49a6f974-45fa-4a38-a4e1-bea744d45f50,DISK], DatanodeInfoWithStorage[127.0.0.1:36129,DS-77519873-4acf-4a3b-8908-9fb3fa1b39ff,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:36129,DS-77519873-4acf-4a3b-8908-9fb3fa1b39ff,DISK]) is bad. 2024-12-03T18:55:51,497 WARN [Thread-1074 {}] hdfs.DataStreamer(1850): Abandoning BP-619597313-172.17.0.2-1733252108905:blk_1073741915_1101 2024-12-03T18:55:51,497 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1037783469_22 at /127.0.0.1:34378 [Receiving block BP-619597313-172.17.0.2-1733252108905:blk_1073741915_1101] {}] datanode.DataXceiver(331): 127.0.0.1:33593:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:34378 dst: /127.0.0.1:33593 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T18:55:51,498 WARN [Thread-1074 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36129,DS-77519873-4acf-4a3b-8908-9fb3fa1b39ff,DISK] 2024-12-03T18:55:51,504 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33593 is added to blk_1073741916_1102 (size=5153) 2024-12-03T18:55:51,504 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37155 is added to blk_1073741916_1102 (size=5153) 2024-12-03T18:55:51,505 INFO [RS_CLOSE_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/data/hbase/meta/1588230740/.tmp/ns/534b3d7a968f4f1da371443c4b63ebab 2024-12-03T18:55:51,530 DEBUG [RS_CLOSE_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/data/hbase/meta/1588230740/.tmp/table/65f9a8588df14d2ebbe731fab49094b7 is 77, key is TestLogRolling-testLogRollOnDatanodeDeath/table:state/1733252113370/Put/seqid=0 2024-12-03T18:55:51,533 WARN [Thread-1081 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741917_1103 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:38523 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T18:55:51,533 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1037783469_22 at /127.0.0.1:34398 [Receiving block BP-619597313-172.17.0.2-1733252108905:blk_1073741917_1103] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c4a60070-dd7e-802a-342e-5f20eae680fc/cluster_4187d5fb-6995-465f-5c7c-0ffcd2fcb479/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c4a60070-dd7e-802a-342e-5f20eae680fc/cluster_4187d5fb-6995-465f-5c7c-0ffcd2fcb479/data/data10]'}, localName='127.0.0.1:33593', datanodeUuid='ac0ce44c-5180-4526-a3b0-db40627349b1', xmitsInProgress=0}:Exception transferring block BP-619597313-172.17.0.2-1733252108905:blk_1073741917_1103 to mirror 127.0.0.1:38523 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T18:55:51,533 WARN [Thread-1081 {}] hdfs.DataStreamer(1731): Error Recovery for BP-619597313-172.17.0.2-1733252108905:blk_1073741917_1103 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33593,DS-49a6f974-45fa-4a38-a4e1-bea744d45f50,DISK], DatanodeInfoWithStorage[127.0.0.1:38523,DS-415cd1d5-a105-4c92-b8f8-993dcb323415,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:38523,DS-415cd1d5-a105-4c92-b8f8-993dcb323415,DISK]) is bad. 2024-12-03T18:55:51,533 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1037783469_22 at /127.0.0.1:34398 [Receiving block BP-619597313-172.17.0.2-1733252108905:blk_1073741917_1103] {}] datanode.BlockReceiver(316): Block 1073741917 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-03T18:55:51,533 WARN [Thread-1081 {}] hdfs.DataStreamer(1850): Abandoning BP-619597313-172.17.0.2-1733252108905:blk_1073741917_1103 2024-12-03T18:55:51,533 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1037783469_22 at /127.0.0.1:34398 [Receiving block BP-619597313-172.17.0.2-1733252108905:blk_1073741917_1103] {}] datanode.DataXceiver(331): 127.0.0.1:33593:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:34398 dst: /127.0.0.1:33593 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T18:55:51,534 WARN [Thread-1081 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38523,DS-415cd1d5-a105-4c92-b8f8-993dcb323415,DISK] 2024-12-03T18:55:51,535 WARN [Thread-1081 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741918_1104 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T18:55:51,535 WARN [Thread-1081 {}] hdfs.DataStreamer(1731): Error Recovery for BP-619597313-172.17.0.2-1733252108905:blk_1073741918_1104 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36129,DS-77519873-4acf-4a3b-8908-9fb3fa1b39ff,DISK], DatanodeInfoWithStorage[127.0.0.1:37155,DS-faa8c47c-a6f5-4610-9c09-f1450308231b,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36129,DS-77519873-4acf-4a3b-8908-9fb3fa1b39ff,DISK]) is bad. 2024-12-03T18:55:51,535 WARN [Thread-1081 {}] hdfs.DataStreamer(1850): Abandoning BP-619597313-172.17.0.2-1733252108905:blk_1073741918_1104 2024-12-03T18:55:51,536 WARN [Thread-1081 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36129,DS-77519873-4acf-4a3b-8908-9fb3fa1b39ff,DISK] 2024-12-03T18:55:51,540 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37155 is added to blk_1073741919_1105 (size=5424) 2024-12-03T18:55:51,540 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33593 is added to blk_1073741919_1105 (size=5424) 2024-12-03T18:55:51,541 INFO [RS_CLOSE_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=146 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/data/hbase/meta/1588230740/.tmp/table/65f9a8588df14d2ebbe731fab49094b7 2024-12-03T18:55:51,549 DEBUG [RS_CLOSE_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/data/hbase/meta/1588230740/.tmp/info/05944f98ab5a48e49dfa4c0a67b5387b as hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/data/hbase/meta/1588230740/info/05944f98ab5a48e49dfa4c0a67b5387b 2024-12-03T18:55:51,549 DEBUG [RS:0;db5a5ccf5be8:46359 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-12-03T18:55:51,555 INFO [RS_CLOSE_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/data/hbase/meta/1588230740/info/05944f98ab5a48e49dfa4c0a67b5387b, entries=10, sequenceid=11, filesize=6.9 K 2024-12-03T18:55:51,557 DEBUG [RS_CLOSE_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/data/hbase/meta/1588230740/.tmp/ns/534b3d7a968f4f1da371443c4b63ebab as hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/data/hbase/meta/1588230740/ns/534b3d7a968f4f1da371443c4b63ebab 2024-12-03T18:55:51,563 INFO [RS_CLOSE_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/data/hbase/meta/1588230740/ns/534b3d7a968f4f1da371443c4b63ebab, entries=2, sequenceid=11, filesize=5.0 K 2024-12-03T18:55:51,565 DEBUG [RS_CLOSE_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/data/hbase/meta/1588230740/.tmp/table/65f9a8588df14d2ebbe731fab49094b7 as hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/data/hbase/meta/1588230740/table/65f9a8588df14d2ebbe731fab49094b7 2024-12-03T18:55:51,572 INFO [RS_CLOSE_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/data/hbase/meta/1588230740/table/65f9a8588df14d2ebbe731fab49094b7, entries=2, sequenceid=11, filesize=5.3 K 2024-12-03T18:55:51,573 INFO [RS_CLOSE_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.71 KB/1752, heapSize ~3.45 KB/3536, currentSize=0 B/0 for 1588230740 in 223ms, sequenceid=11, compaction requested=false 2024-12-03T18:55:51,578 DEBUG [RS_CLOSE_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-12-03T18:55:51,579 DEBUG [RS_CLOSE_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-03T18:55:51,579 INFO [RS_CLOSE_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-03T18:55:51,579 DEBUG [RS_CLOSE_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733252151349Running coprocessor pre-close hooks at 1733252151349Disabling compacts and flushes for region at 1733252151349Disabling writes for close at 1733252151350 (+1 ms)Obtaining lock to block concurrent updates at 1733252151350Preparing flush snapshotting stores in 1588230740 at 1733252151350Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1752, getHeapSize=3776, getOffHeapSize=0, getCellsCount=14 at 1733252151350Flushing stores of hbase:meta,,1.1588230740 at 1733252151413 (+63 ms)Flushing 1588230740/info: creating writer at 1733252151413Flushing 1588230740/info: appending metadata at 1733252151433 (+20 ms)Flushing 1588230740/info: closing flushed file at 1733252151433Flushing 1588230740/ns: creating writer at 1733252151463 (+30 ms)Flushing 1588230740/ns: appending metadata at 1733252151482 (+19 ms)Flushing 1588230740/ns: closing flushed file at 1733252151482Flushing 1588230740/table: creating writer at 1733252151510 (+28 ms)Flushing 1588230740/table: appending metadata at 1733252151529 (+19 ms)Flushing 1588230740/table: closing flushed file at 1733252151529Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@759693db: reopening flushed file at 1733252151548 (+19 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5aea5bc3: reopening flushed file at 1733252151556 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@68db9805: reopening flushed file at 1733252151564 (+8 ms)Finished flush of dataSize ~1.71 KB/1752, heapSize ~3.45 KB/3536, currentSize=0 B/0 for 1588230740 in 223ms, sequenceid=11, compaction requested=false at 1733252151574 (+10 ms)Writing region close event to WAL at 1733252151575 (+1 ms)Running coprocessor post-close hooks at 1733252151579 (+4 ms)Closed at 1733252151579 2024-12-03T18:55:51,580 DEBUG [RS_CLOSE_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-03T18:55:51,750 INFO [RS:0;db5a5ccf5be8:46359 {}] regionserver.HRegionServer(976): stopping server db5a5ccf5be8,46359,1733252111199; all regions closed. 2024-12-03T18:55:51,750 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:55:51,750 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:55:51,750 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:55:51,751 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:55:51,751 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:55:51,753 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33593 is added to blk_1073741908_1093 (size=825) 2024-12-03T18:55:51,753 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37155 is added to blk_1073741908_1093 (size=825) 2024-12-03T18:55:51,921 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@ef50e6e[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:33593, datanodeUuid=ac0ce44c-5180-4526-a3b0-db40627349b1, infoPort=44371, infoSecurePort=0, ipcPort=42883, storageInfo=lv=-57;cid=testClusterID;nsid=1292028406;c=1733252108905):Failed to transfer BP-619597313-172.17.0.2-1733252108905:blk_1073741877_1060 to 127.0.0.1:38523 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T18:55:52,016 INFO [regionserver/db5a5ccf5be8:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-03T18:55:52,016 INFO [regionserver/db5a5ccf5be8:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-03T18:55:52,088 INFO [regionserver/db5a5ccf5be8:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-03T18:55:52,147 INFO [regionserver/db5a5ccf5be8:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-03T18:55:52,147 INFO [regionserver/db5a5ccf5be8:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-03T18:55:52,962 INFO [regionserver/db5a5ccf5be8:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-03T18:55:53,652 INFO [master/db5a5ccf5be8:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-12-03T18:55:53,652 INFO [master/db5a5ccf5be8:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-12-03T18:55:54,800 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33593 is added to blk_1073741836_1012 (size=76) 2024-12-03T18:55:54,800 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33593 is added to blk_1073741832_1008 (size=32) 2024-12-03T18:55:55,359 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,44293,1733252112865/db5a5ccf5be8%2C44293%2C1733252112865.1733252113091 after 4002ms 2024-12-03T18:55:55,405 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.meta.1733252112663.meta after 4002ms 2024-12-03T18:55:55,804 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33593 is added to blk_1073741828_1004 (size=1189) 2024-12-03T18:55:55,805 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33593 is added to blk_1073741826_1002 (size=42) 2024-12-03T18:55:56,356 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-12-03T18:55:56,359 DEBUG [RS:1;db5a5ccf5be8:44293 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/oldWALs 2024-12-03T18:55:56,360 INFO [RS:1;db5a5ccf5be8:44293 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog db5a5ccf5be8%2C44293%2C1733252112865:(num 1733252113091) 2024-12-03T18:55:56,360 DEBUG [RS:1;db5a5ccf5be8:44293 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T18:55:56,360 INFO [RS:1;db5a5ccf5be8:44293 {}] regionserver.LeaseManager(133): Closed leases 2024-12-03T18:55:56,360 INFO [RS:1;db5a5ccf5be8:44293 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-03T18:55:56,360 INFO [RS:1;db5a5ccf5be8:44293 {}] hbase.ChoreService(370): Chore service for: regionserver/db5a5ccf5be8:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-03T18:55:56,361 INFO [RS:1;db5a5ccf5be8:44293 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-03T18:55:56,361 INFO [RS:1;db5a5ccf5be8:44293 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-03T18:55:56,361 INFO [regionserver/db5a5ccf5be8:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-03T18:55:56,361 INFO [RS:1;db5a5ccf5be8:44293 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-03T18:55:56,361 INFO [RS:1;db5a5ccf5be8:44293 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-03T18:55:56,361 INFO [RS:1;db5a5ccf5be8:44293 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:44293 2024-12-03T18:55:56,366 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,44293,1733252112865/db5a5ccf5be8%2C44293%2C1733252112865.1733252113091 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.FileNotFoundException: File does not exist: /user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,44293,1733252112865/db5a5ccf5be8%2C44293%2C1733252112865.1733252113091 at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.ipc.RemoteException.instantiateException(RemoteException.java:121) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.RemoteException.unwrapRemoteException(RemoteException.java:88) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1812) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more Caused by: org.apache.hadoop.ipc.RemoteException: File does not exist: /user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,44293,1733252112865/db5a5ccf5be8%2C44293%2C1733252112865.1733252113091 at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$isFileClosed$57(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.isFileClosed(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1810) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:55:56,376 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39725-0x1019c8c56530000, quorum=127.0.0.1:53957, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-03T18:55:56,376 DEBUG [pool-381-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44293-0x1019c8c56530002, quorum=127.0.0.1:53957, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/db5a5ccf5be8,44293,1733252112865 2024-12-03T18:55:56,376 INFO [RS:1;db5a5ccf5be8:44293 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-03T18:55:56,387 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [db5a5ccf5be8,44293,1733252112865] 2024-12-03T18:55:56,397 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/db5a5ccf5be8,44293,1733252112865 already deleted, retry=false 2024-12-03T18:55:56,397 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; db5a5ccf5be8,44293,1733252112865 expired; onlineServers=1 2024-12-03T18:55:56,412 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:55:56,434 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:55:56,435 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:55:56,435 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:55:56,435 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:55:56,436 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:55:56,446 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:55:56,446 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:55:56,487 INFO [RS:1;db5a5ccf5be8:44293 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-03T18:55:56,487 DEBUG [pool-381-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44293-0x1019c8c56530002, quorum=127.0.0.1:53957, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-03T18:55:56,487 DEBUG [pool-381-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44293-0x1019c8c56530002, quorum=127.0.0.1:53957, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-03T18:55:56,487 INFO [RS:1;db5a5ccf5be8:44293 {}] regionserver.HRegionServer(1031): Exiting; stopping=db5a5ccf5be8,44293,1733252112865; zookeeper connection closed. 2024-12-03T18:55:56,487 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@10bb86a8 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@10bb86a8 2024-12-03T18:55:56,751 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-12-03T18:55:56,755 DEBUG [RS:0;db5a5ccf5be8:46359 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/oldWALs 2024-12-03T18:55:56,755 INFO [RS:0;db5a5ccf5be8:46359 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog db5a5ccf5be8%2C46359%2C1733252111199.meta:.meta(num 1733252151351) 2024-12-03T18:55:56,755 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:55:56,756 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:55:56,756 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:55:56,756 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:55:56,756 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:55:56,758 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33593 is added to blk_1073741894_1078 (size=16308) 2024-12-03T18:55:56,758 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37155 is added to blk_1073741894_1078 (size=16308) 2024-12-03T18:55:56,761 DEBUG [RS:0;db5a5ccf5be8:46359 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/oldWALs 2024-12-03T18:55:56,761 INFO [RS:0;db5a5ccf5be8:46359 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog db5a5ccf5be8%2C46359%2C1733252111199:(num 1733252150847) 2024-12-03T18:55:56,761 DEBUG [RS:0;db5a5ccf5be8:46359 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T18:55:56,761 INFO [RS:0;db5a5ccf5be8:46359 {}] regionserver.LeaseManager(133): Closed leases 2024-12-03T18:55:56,761 INFO [RS:0;db5a5ccf5be8:46359 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-03T18:55:56,761 INFO [RS:0;db5a5ccf5be8:46359 {}] hbase.ChoreService(370): Chore service for: regionserver/db5a5ccf5be8:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-12-03T18:55:56,762 INFO [RS:0;db5a5ccf5be8:46359 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-03T18:55:56,762 INFO [regionserver/db5a5ccf5be8:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-03T18:55:56,762 INFO [RS:0;db5a5ccf5be8:46359 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:46359 2024-12-03T18:55:56,786 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39725-0x1019c8c56530000, quorum=127.0.0.1:53957, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-03T18:55:56,786 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46359-0x1019c8c56530001, quorum=127.0.0.1:53957, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/db5a5ccf5be8,46359,1733252111199 2024-12-03T18:55:56,787 INFO [RS:0;db5a5ccf5be8:46359 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-03T18:55:56,797 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [db5a5ccf5be8,46359,1733252111199] 2024-12-03T18:55:56,807 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/db5a5ccf5be8,46359,1733252111199 already deleted, retry=false 2024-12-03T18:55:56,807 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; db5a5ccf5be8,46359,1733252111199 expired; onlineServers=0 2024-12-03T18:55:56,807 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'db5a5ccf5be8,39725,1733252111031' ***** 2024-12-03T18:55:56,807 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-03T18:55:56,808 INFO [M:0;db5a5ccf5be8:39725 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-03T18:55:56,808 INFO [M:0;db5a5ccf5be8:39725 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-03T18:55:56,808 DEBUG [M:0;db5a5ccf5be8:39725 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-03T18:55:56,808 DEBUG [M:0;db5a5ccf5be8:39725 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-03T18:55:56,808 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-03T18:55:56,808 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster-HFileCleaner.small.0-1733252111993 {}] cleaner.HFileCleaner(306): Exit Thread[master/db5a5ccf5be8:0:becomeActiveMaster-HFileCleaner.small.0-1733252111993,5,FailOnTimeoutGroup] 2024-12-03T18:55:56,808 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster-HFileCleaner.large.0-1733252111992 {}] cleaner.HFileCleaner(306): Exit Thread[master/db5a5ccf5be8:0:becomeActiveMaster-HFileCleaner.large.0-1733252111992,5,FailOnTimeoutGroup] 2024-12-03T18:55:56,808 INFO [M:0;db5a5ccf5be8:39725 {}] hbase.ChoreService(370): Chore service for: master/db5a5ccf5be8:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-03T18:55:56,808 INFO [M:0;db5a5ccf5be8:39725 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-03T18:55:56,808 DEBUG [M:0;db5a5ccf5be8:39725 {}] master.HMaster(1795): Stopping service threads 2024-12-03T18:55:56,808 INFO [M:0;db5a5ccf5be8:39725 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-03T18:55:56,809 INFO [M:0;db5a5ccf5be8:39725 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-03T18:55:56,809 INFO [M:0;db5a5ccf5be8:39725 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-03T18:55:56,809 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-03T18:55:56,818 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39725-0x1019c8c56530000, quorum=127.0.0.1:53957, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-03T18:55:56,818 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39725-0x1019c8c56530000, quorum=127.0.0.1:53957, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T18:55:56,818 DEBUG [M:0;db5a5ccf5be8:39725 {}] zookeeper.ZKUtil(347): master:39725-0x1019c8c56530000, quorum=127.0.0.1:53957, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-03T18:55:56,818 WARN [M:0;db5a5ccf5be8:39725 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-03T18:55:56,819 INFO [M:0;db5a5ccf5be8:39725 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/.lastflushedseqids 2024-12-03T18:55:56,826 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33593 is added to blk_1073741920_1106 (size=130) 2024-12-03T18:55:56,826 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37155 is added to blk_1073741920_1106 (size=130) 2024-12-03T18:55:56,827 INFO [M:0;db5a5ccf5be8:39725 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-03T18:55:56,827 INFO [M:0;db5a5ccf5be8:39725 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-03T18:55:56,827 DEBUG [M:0;db5a5ccf5be8:39725 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-03T18:55:56,827 INFO [M:0;db5a5ccf5be8:39725 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T18:55:56,827 DEBUG [M:0;db5a5ccf5be8:39725 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T18:55:56,827 DEBUG [M:0;db5a5ccf5be8:39725 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-03T18:55:56,827 DEBUG [M:0;db5a5ccf5be8:39725 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T18:55:56,828 INFO [M:0;db5a5ccf5be8:39725 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.24 KB heapSize=29.47 KB 2024-12-03T18:55:56,847 DEBUG [M:0;db5a5ccf5be8:39725 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/09c6a7695ab944ed8b5d7d8fcd0e15e4 is 82, key is hbase:meta,,1/info:regioninfo/1733252112697/Put/seqid=0 2024-12-03T18:55:56,852 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37155 is added to blk_1073741921_1107 (size=5672) 2024-12-03T18:55:56,853 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33593 is added to blk_1073741921_1107 (size=5672) 2024-12-03T18:55:56,897 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46359-0x1019c8c56530001, quorum=127.0.0.1:53957, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-03T18:55:56,897 INFO [RS:0;db5a5ccf5be8:46359 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-03T18:55:56,897 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46359-0x1019c8c56530001, quorum=127.0.0.1:53957, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-03T18:55:56,897 INFO [RS:0;db5a5ccf5be8:46359 {}] regionserver.HRegionServer(1031): Exiting; stopping=db5a5ccf5be8,46359,1733252111199; zookeeper connection closed. 2024-12-03T18:55:56,898 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@355be7ab {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@355be7ab 2024-12-03T18:55:56,898 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 2 regionserver(s) complete 2024-12-03T18:55:56,949 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-03T18:55:56,967 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:55:56,967 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:55:56,967 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:55:56,967 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:55:56,968 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:55:56,968 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:55:56,970 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:55:56,972 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:55:57,253 INFO [M:0;db5a5ccf5be8:39725 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/09c6a7695ab944ed8b5d7d8fcd0e15e4 2024-12-03T18:55:57,278 DEBUG [M:0;db5a5ccf5be8:39725 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/f8efd6d74a6b4468ad4dc7c32cc20e4c is 773, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1733252113376/Put/seqid=0 2024-12-03T18:55:57,287 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33593 is added to blk_1073741922_1108 (size=6254) 2024-12-03T18:55:57,288 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37155 is added to blk_1073741922_1108 (size=6254) 2024-12-03T18:55:57,288 INFO [M:0;db5a5ccf5be8:39725 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.57 KB at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/f8efd6d74a6b4468ad4dc7c32cc20e4c 2024-12-03T18:55:57,293 INFO [M:0;db5a5ccf5be8:39725 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for f8efd6d74a6b4468ad4dc7c32cc20e4c 2024-12-03T18:55:57,308 DEBUG [M:0;db5a5ccf5be8:39725 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/94a618cddd0d4752afcb516b3fe7e30f is 69, key is db5a5ccf5be8,44293,1733252112865/rs:state/1733252112940/Put/seqid=0 2024-12-03T18:55:57,313 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37155 is added to blk_1073741923_1109 (size=5224) 2024-12-03T18:55:57,313 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33593 is added to blk_1073741923_1109 (size=5224) 2024-12-03T18:55:57,313 INFO [M:0;db5a5ccf5be8:39725 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=130 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/94a618cddd0d4752afcb516b3fe7e30f 2024-12-03T18:55:57,334 DEBUG [M:0;db5a5ccf5be8:39725 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/87313a9bcd254bc9ad0c15aebf065412 is 52, key is load_balancer_on/state:d/1733252112843/Put/seqid=0 2024-12-03T18:55:57,343 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37155 is added to blk_1073741924_1110 (size=5056) 2024-12-03T18:55:57,343 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33593 is added to blk_1073741924_1110 (size=5056) 2024-12-03T18:55:57,343 INFO [M:0;db5a5ccf5be8:39725 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/87313a9bcd254bc9ad0c15aebf065412 2024-12-03T18:55:57,349 DEBUG [M:0;db5a5ccf5be8:39725 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/09c6a7695ab944ed8b5d7d8fcd0e15e4 as hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/09c6a7695ab944ed8b5d7d8fcd0e15e4 2024-12-03T18:55:57,355 INFO [M:0;db5a5ccf5be8:39725 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/09c6a7695ab944ed8b5d7d8fcd0e15e4, entries=8, sequenceid=60, filesize=5.5 K 2024-12-03T18:55:57,356 DEBUG [M:0;db5a5ccf5be8:39725 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/f8efd6d74a6b4468ad4dc7c32cc20e4c as hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/f8efd6d74a6b4468ad4dc7c32cc20e4c 2024-12-03T18:55:57,364 INFO [M:0;db5a5ccf5be8:39725 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for f8efd6d74a6b4468ad4dc7c32cc20e4c 2024-12-03T18:55:57,365 INFO [M:0;db5a5ccf5be8:39725 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/f8efd6d74a6b4468ad4dc7c32cc20e4c, entries=6, sequenceid=60, filesize=6.1 K 2024-12-03T18:55:57,366 DEBUG [M:0;db5a5ccf5be8:39725 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/94a618cddd0d4752afcb516b3fe7e30f as hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/94a618cddd0d4752afcb516b3fe7e30f 2024-12-03T18:55:57,368 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,44293,1733252112865/db5a5ccf5be8%2C44293%2C1733252112865.1733252113091 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:55:57,373 INFO [M:0;db5a5ccf5be8:39725 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/94a618cddd0d4752afcb516b3fe7e30f, entries=2, sequenceid=60, filesize=5.1 K 2024-12-03T18:55:57,374 DEBUG [M:0;db5a5ccf5be8:39725 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/87313a9bcd254bc9ad0c15aebf065412 as hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/87313a9bcd254bc9ad0c15aebf065412 2024-12-03T18:55:57,380 INFO [M:0;db5a5ccf5be8:39725 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/87313a9bcd254bc9ad0c15aebf065412, entries=1, sequenceid=60, filesize=4.9 K 2024-12-03T18:55:57,381 INFO [M:0;db5a5ccf5be8:39725 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.24 KB/23793, heapSize ~29.41 KB/30112, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 554ms, sequenceid=60, compaction requested=false 2024-12-03T18:55:57,389 INFO [M:0;db5a5ccf5be8:39725 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T18:55:57,389 DEBUG [M:0;db5a5ccf5be8:39725 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733252156827Disabling compacts and flushes for region at 1733252156827Disabling writes for close at 1733252156827Obtaining lock to block concurrent updates at 1733252156828 (+1 ms)Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733252156828Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23793, getHeapSize=30112, getOffHeapSize=0, getCellsCount=71 at 1733252156828Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733252156829 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733252156829Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733252156847 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733252156847Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733252157260 (+413 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733252157277 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733252157277Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733252157293 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733252157308 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733252157308Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1733252157319 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1733252157333 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1733252157333Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@46430f5f: reopening flushed file at 1733252157348 (+15 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1542be0: reopening flushed file at 1733252157355 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6bc02dc6: reopening flushed file at 1733252157365 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@23c2fd2b: reopening flushed file at 1733252157373 (+8 ms)Finished flush of dataSize ~23.24 KB/23793, heapSize ~29.41 KB/30112, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 554ms, sequenceid=60, compaction requested=false at 1733252157381 (+8 ms)Writing region close event to WAL at 1733252157389 (+8 ms)Closed at 1733252157389 2024-12-03T18:55:57,391 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:55:57,392 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:55:57,392 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:55:57,392 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:55:57,392 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:55:57,396 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37155 is added to blk_1073741890_1073 (size=1045) 2024-12-03T18:55:57,396 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33593 is added to blk_1073741890_1073 (size=1045) 2024-12-03T18:55:57,406 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.meta.1733252112663.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:55:57,804 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33593 is added to blk_1073741835_1011 (size=393) 2024-12-03T18:55:57,805 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33593 is added to blk_1073741831_1007 (size=1321) 2024-12-03T18:55:57,821 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@109b74a9 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-619597313-172.17.0.2-1733252108905:blk_1073741830_1006, datanode=DatanodeInfoWithStorage[127.0.0.1:36129,null,null]) java.net.ConnectException: Call From db5a5ccf5be8/172.17.0.2 to localhost:44411 failed on connection exception: java.net.ConnectException: Connection refused; For more details see: http://wiki.apache.org/hadoop/ConnectionRefused at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:876) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:668) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-12-03T18:55:57,876 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath 2024-12-03T18:55:57,876 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-03T18:55:57,876 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-03T18:55:57,876 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-12-03T18:55:58,024 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/MasterData/WALs/db5a5ccf5be8,39725,1733252111031/db5a5ccf5be8%2C39725%2C1733252111031.1733252111735 to hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/MasterData/oldWALs/db5a5ccf5be8%2C39725%2C1733252111031.1733252111735 2024-12-03T18:55:58,034 INFO [WAL-Archive-0 {}] region.MasterRegionUtils(50): Moved hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/MasterData/oldWALs/db5a5ccf5be8%2C39725%2C1733252111031.1733252111735 to hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/oldWALs/db5a5ccf5be8%2C39725%2C1733252111031.1733252111735$masterlocalwal$ 2024-12-03T18:55:58,034 INFO [M:0;db5a5ccf5be8:39725 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-03T18:55:58,034 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-03T18:55:58,035 INFO [M:0;db5a5ccf5be8:39725 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:39725 2024-12-03T18:55:58,035 INFO [M:0;db5a5ccf5be8:39725 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-03T18:55:58,245 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39725-0x1019c8c56530000, quorum=127.0.0.1:53957, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-03T18:55:58,245 INFO [M:0;db5a5ccf5be8:39725 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-03T18:55:58,245 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39725-0x1019c8c56530000, quorum=127.0.0.1:53957, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-03T18:55:58,250 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@14b00457{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T18:55:58,251 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@22c6c03b{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-03T18:55:58,251 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-03T18:55:58,251 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@381443d3{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-03T18:55:58,251 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@441dcfc4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c4a60070-dd7e-802a-342e-5f20eae680fc/hadoop.log.dir/,STOPPED} 2024-12-03T18:55:58,253 WARN [BP-619597313-172.17.0.2-1733252108905 heartbeating to localhost/127.0.0.1:37681 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-03T18:55:58,253 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-03T18:55:58,253 WARN [BP-619597313-172.17.0.2-1733252108905 heartbeating to localhost/127.0.0.1:37681 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-619597313-172.17.0.2-1733252108905 (Datanode Uuid ff2ed10a-1a62-4254-b7ab-13ac5358207a) service to localhost/127.0.0.1:37681 2024-12-03T18:55:58,253 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@70fbf936 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-619597313-172.17.0.2-1733252108905:blk_1073741837_1013, datanode=DatanodeInfoWithStorage[127.0.0.1:36129,null,null]) java.io.InterruptedIOException: DestHost:destPort localhost:44411 , LocalHost:localPort db5a5ccf5be8/172.17.0.2:0. Failed on local exception: java.io.InterruptedIOException: Interrupted: action=RetryAction(action=RETRY, delayMillis=1000, reason=retries get failed due to exceeded maximum allowed retries number: 10), retry policy=RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1000 MILLISECONDS) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:936) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.InterruptedIOException: Interrupted: action=RetryAction(action=RETRY, delayMillis=1000, reason=retries get failed due to exceeded maximum allowed retries number: 10), retry policy=RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1000 MILLISECONDS) at org.apache.hadoop.ipc.Client$Connection.handleConnectionFailure(Client.java:963) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:691) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more Caused by: java.lang.InterruptedException: sleep interrupted at java.lang.Thread.sleep(Native Method) ~[?:?] at org.apache.hadoop.ipc.Client$Connection.handleConnectionFailure(Client.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:691) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-12-03T18:55:58,253 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-03T18:55:58,253 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@70fbf936 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-619597313-172.17.0.2-1733252108905:blk_1073741837_1013, datanode=DatanodeInfoWithStorage[127.0.0.1:37155,null,null]) java.io.IOException: No block pool offer service for bpid=BP-619597313-172.17.0.2-1733252108905 at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.getDatanodeID(BlockRecoveryWorker.java:539) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$000(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:131) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T18:55:58,254 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@70fbf936 {}] datanode.BlockRecoveryWorker$1(605): recover Block: RecoveringBlock{BP-619597313-172.17.0.2-1733252108905:blk_1073741837_1013; getBlockSize()=85; corrupt=false; offset=-1; locs=[DatanodeInfoWithStorage[127.0.0.1:36129,null,null], DatanodeInfoWithStorage[127.0.0.1:37155,null,null]]; cachedLocs=[]} FAILED: java.io.IOException: All datanodes failed: block=BP-619597313-172.17.0.2-1733252108905:blk_1073741837_1013, datanodeids=[DatanodeInfoWithStorage[127.0.0.1:36129,null,null], DatanodeInfoWithStorage[127.0.0.1:37155,null,null]] 2024-12-03T18:55:58,254 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@70fbf936 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-619597313-172.17.0.2-1733252108905:blk_1073741834_1010, datanode=DatanodeInfoWithStorage[127.0.0.1:36129,null,null]) java.io.IOException: No block pool offer service for bpid=BP-619597313-172.17.0.2-1733252108905 at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.getDatanodeID(BlockRecoveryWorker.java:539) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$000(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:131) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T18:55:58,254 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@70fbf936 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-619597313-172.17.0.2-1733252108905:blk_1073741834_1010, datanode=DatanodeInfoWithStorage[127.0.0.1:37155,null,null]) java.io.IOException: No block pool offer service for bpid=BP-619597313-172.17.0.2-1733252108905 at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.getDatanodeID(BlockRecoveryWorker.java:539) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$000(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:131) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T18:55:58,254 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c4a60070-dd7e-802a-342e-5f20eae680fc/cluster_4187d5fb-6995-465f-5c7c-0ffcd2fcb479/data/data3/current/BP-619597313-172.17.0.2-1733252108905 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T18:55:58,254 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@70fbf936 {}] datanode.BlockRecoveryWorker$1(605): recover Block: RecoveringBlock{BP-619597313-172.17.0.2-1733252108905:blk_1073741834_1010; getBlockSize()=85; corrupt=false; offset=-1; locs=[DatanodeInfoWithStorage[127.0.0.1:36129,null,null], DatanodeInfoWithStorage[127.0.0.1:37155,null,null]]; cachedLocs=[]} FAILED: java.io.IOException: All datanodes failed: block=BP-619597313-172.17.0.2-1733252108905:blk_1073741834_1010, datanodeids=[DatanodeInfoWithStorage[127.0.0.1:36129,null,null], DatanodeInfoWithStorage[127.0.0.1:37155,null,null]] 2024-12-03T18:55:58,254 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c4a60070-dd7e-802a-342e-5f20eae680fc/cluster_4187d5fb-6995-465f-5c7c-0ffcd2fcb479/data/data4/current/BP-619597313-172.17.0.2-1733252108905 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T18:55:58,255 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-03T18:55:58,257 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@20faceaa{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T18:55:58,257 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3e1ad43e{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-03T18:55:58,257 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-03T18:55:58,258 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@47432b7b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-03T18:55:58,258 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6b21f544{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c4a60070-dd7e-802a-342e-5f20eae680fc/hadoop.log.dir/,STOPPED} 2024-12-03T18:55:58,259 WARN [BP-619597313-172.17.0.2-1733252108905 heartbeating to localhost/127.0.0.1:37681 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-03T18:55:58,259 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-03T18:55:58,259 WARN [BP-619597313-172.17.0.2-1733252108905 heartbeating to localhost/127.0.0.1:37681 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-619597313-172.17.0.2-1733252108905 (Datanode Uuid ac0ce44c-5180-4526-a3b0-db40627349b1) service to localhost/127.0.0.1:37681 2024-12-03T18:55:58,259 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-03T18:55:58,260 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c4a60070-dd7e-802a-342e-5f20eae680fc/cluster_4187d5fb-6995-465f-5c7c-0ffcd2fcb479/data/data9/current/BP-619597313-172.17.0.2-1733252108905 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T18:55:58,260 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c4a60070-dd7e-802a-342e-5f20eae680fc/cluster_4187d5fb-6995-465f-5c7c-0ffcd2fcb479/data/data10/current/BP-619597313-172.17.0.2-1733252108905 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T18:55:58,260 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-03T18:55:58,265 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@75096fee{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-03T18:55:58,266 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@346c0162{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-03T18:55:58,266 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-03T18:55:58,266 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3e26ba04{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-03T18:55:58,266 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4ac253d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c4a60070-dd7e-802a-342e-5f20eae680fc/hadoop.log.dir/,STOPPED} 2024-12-03T18:55:58,275 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-03T18:55:58,303 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-03T18:55:58,312 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnDatanodeDeath Thread=156 (was 81) Potentially hanging thread: HMaster-EventLoopGroup-7-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-6 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-9-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'DataNode' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-8 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:37681 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-5 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-7 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-9 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-19-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.2@localhost:37681 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-9-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:44241 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-11 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:37681 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-10 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-19-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:37681 from jenkins.hfs.2 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:37681 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:37681 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.1@localhost:44241 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-18-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:37681 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-18-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:37681 from jenkins.hfs.3 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$900/0x00007ff408bef5c0.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$900/0x00007ff408bef5c0.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-19-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.3@localhost:37681 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-18-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:37681 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:37681 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=450 (was 402) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=124 (was 137), ProcessCount=11 (was 11), AvailableMemoryMB=6725 (was 6416) - AvailableMemoryMB LEAK? - 2024-12-03T18:55:58,318 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnPipelineRestart Thread=156, OpenFileDescriptor=450, MaxFileDescriptor=1048576, SystemLoadAverage=124, ProcessCount=11, AvailableMemoryMB=6725 2024-12-03T18:55:58,319 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-03T18:55:58,319 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c4a60070-dd7e-802a-342e-5f20eae680fc/hadoop.log.dir so I do NOT create it in target/test-data/6e40a913-c135-17dc-08ad-3f47afab457a 2024-12-03T18:55:58,319 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c4a60070-dd7e-802a-342e-5f20eae680fc/hadoop.tmp.dir so I do NOT create it in target/test-data/6e40a913-c135-17dc-08ad-3f47afab457a 2024-12-03T18:55:58,319 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e40a913-c135-17dc-08ad-3f47afab457a/cluster_e67e287f-8396-1ac8-75f8-a9eceb442aa7, deleteOnExit=true 2024-12-03T18:55:58,319 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-03T18:55:58,319 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e40a913-c135-17dc-08ad-3f47afab457a/test.cache.data in system properties and HBase conf 2024-12-03T18:55:58,319 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e40a913-c135-17dc-08ad-3f47afab457a/hadoop.tmp.dir in system properties and HBase conf 2024-12-03T18:55:58,319 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e40a913-c135-17dc-08ad-3f47afab457a/hadoop.log.dir in system properties and HBase conf 2024-12-03T18:55:58,319 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e40a913-c135-17dc-08ad-3f47afab457a/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-03T18:55:58,319 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e40a913-c135-17dc-08ad-3f47afab457a/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-03T18:55:58,319 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-03T18:55:58,319 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-03T18:55:58,320 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e40a913-c135-17dc-08ad-3f47afab457a/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-03T18:55:58,320 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e40a913-c135-17dc-08ad-3f47afab457a/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-03T18:55:58,320 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e40a913-c135-17dc-08ad-3f47afab457a/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-03T18:55:58,320 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e40a913-c135-17dc-08ad-3f47afab457a/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-03T18:55:58,320 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e40a913-c135-17dc-08ad-3f47afab457a/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-03T18:55:58,320 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e40a913-c135-17dc-08ad-3f47afab457a/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-03T18:55:58,320 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e40a913-c135-17dc-08ad-3f47afab457a/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-03T18:55:58,320 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e40a913-c135-17dc-08ad-3f47afab457a/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-03T18:55:58,320 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e40a913-c135-17dc-08ad-3f47afab457a/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-03T18:55:58,320 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e40a913-c135-17dc-08ad-3f47afab457a/nfs.dump.dir in system properties and HBase conf 2024-12-03T18:55:58,320 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e40a913-c135-17dc-08ad-3f47afab457a/java.io.tmpdir in system properties and HBase conf 2024-12-03T18:55:58,320 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e40a913-c135-17dc-08ad-3f47afab457a/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-03T18:55:58,320 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e40a913-c135-17dc-08ad-3f47afab457a/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-03T18:55:58,320 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e40a913-c135-17dc-08ad-3f47afab457a/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-03T18:55:58,330 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-03T18:55:58,369 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,44293,1733252112865/db5a5ccf5be8%2C44293%2C1733252112865.1733252113091 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:55:58,407 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.meta.1733252112663.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:55:58,620 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-03T18:55:58,623 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-03T18:55:58,625 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-03T18:55:58,625 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-03T18:55:58,625 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-03T18:55:58,625 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-03T18:55:58,626 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3bf7054a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e40a913-c135-17dc-08ad-3f47afab457a/hadoop.log.dir/,AVAILABLE} 2024-12-03T18:55:58,626 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@21865735{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-03T18:55:58,716 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@60c4032c{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e40a913-c135-17dc-08ad-3f47afab457a/java.io.tmpdir/jetty-localhost-36301-hadoop-hdfs-3_4_1-tests_jar-_-any-4821985317438454777/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-03T18:55:58,717 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@42369482{HTTP/1.1, (http/1.1)}{localhost:36301} 2024-12-03T18:55:58,717 INFO [Time-limited test {}] server.Server(415): Started @156593ms 2024-12-03T18:55:58,727 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-03T18:55:58,963 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-03T18:55:58,966 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-03T18:55:58,967 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-03T18:55:58,967 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-03T18:55:58,967 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-03T18:55:58,967 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@719d6bc4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e40a913-c135-17dc-08ad-3f47afab457a/hadoop.log.dir/,AVAILABLE} 2024-12-03T18:55:58,968 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6a724f0c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-03T18:55:59,057 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@479334cf{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e40a913-c135-17dc-08ad-3f47afab457a/java.io.tmpdir/jetty-localhost-41691-hadoop-hdfs-3_4_1-tests_jar-_-any-569020858250297261/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T18:55:59,057 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5d6935b5{HTTP/1.1, (http/1.1)}{localhost:41691} 2024-12-03T18:55:59,058 INFO [Time-limited test {}] server.Server(415): Started @156934ms 2024-12-03T18:55:59,059 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-03T18:55:59,083 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-03T18:55:59,086 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-03T18:55:59,087 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-03T18:55:59,087 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-03T18:55:59,087 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-03T18:55:59,088 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@718cd5f1{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e40a913-c135-17dc-08ad-3f47afab457a/hadoop.log.dir/,AVAILABLE} 2024-12-03T18:55:59,088 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5d751fec{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-03T18:55:59,179 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@63008d08{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e40a913-c135-17dc-08ad-3f47afab457a/java.io.tmpdir/jetty-localhost-33515-hadoop-hdfs-3_4_1-tests_jar-_-any-17258295714590207272/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T18:55:59,179 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5a20a16b{HTTP/1.1, (http/1.1)}{localhost:33515} 2024-12-03T18:55:59,179 INFO [Time-limited test {}] server.Server(415): Started @157056ms 2024-12-03T18:55:59,180 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-03T18:55:59,369 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,44293,1733252112865/db5a5ccf5be8%2C44293%2C1733252112865.1733252113091 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:55:59,407 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.meta.1733252112663.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:56:00,101 WARN [Thread-1219 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e40a913-c135-17dc-08ad-3f47afab457a/cluster_e67e287f-8396-1ac8-75f8-a9eceb442aa7/data/data1/current/BP-1677972484-172.17.0.2-1733252158340/current, will proceed with Du for space computation calculation, 2024-12-03T18:56:00,101 WARN [Thread-1220 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e40a913-c135-17dc-08ad-3f47afab457a/cluster_e67e287f-8396-1ac8-75f8-a9eceb442aa7/data/data2/current/BP-1677972484-172.17.0.2-1733252158340/current, will proceed with Du for space computation calculation, 2024-12-03T18:56:00,120 WARN [Thread-1183 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-03T18:56:00,122 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x21d949b088404601 with lease ID 0x841cd9ab2fcf6018: Processing first storage report for DS-63a5d467-e8c8-4238-ba88-56526b3f9bf2 from datanode DatanodeRegistration(127.0.0.1:46217, datanodeUuid=bd6c022d-b2da-4b7e-9001-0af9fe78716f, infoPort=40745, infoSecurePort=0, ipcPort=35581, storageInfo=lv=-57;cid=testClusterID;nsid=848947567;c=1733252158340) 2024-12-03T18:56:00,122 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x21d949b088404601 with lease ID 0x841cd9ab2fcf6018: from storage DS-63a5d467-e8c8-4238-ba88-56526b3f9bf2 node DatanodeRegistration(127.0.0.1:46217, datanodeUuid=bd6c022d-b2da-4b7e-9001-0af9fe78716f, infoPort=40745, infoSecurePort=0, ipcPort=35581, storageInfo=lv=-57;cid=testClusterID;nsid=848947567;c=1733252158340), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-03T18:56:00,122 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x21d949b088404601 with lease ID 0x841cd9ab2fcf6018: Processing first storage report for DS-d1305e60-28fd-493e-8e66-c5ee7a027671 from datanode DatanodeRegistration(127.0.0.1:46217, datanodeUuid=bd6c022d-b2da-4b7e-9001-0af9fe78716f, infoPort=40745, infoSecurePort=0, ipcPort=35581, storageInfo=lv=-57;cid=testClusterID;nsid=848947567;c=1733252158340) 2024-12-03T18:56:00,122 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x21d949b088404601 with lease ID 0x841cd9ab2fcf6018: from storage DS-d1305e60-28fd-493e-8e66-c5ee7a027671 node DatanodeRegistration(127.0.0.1:46217, datanodeUuid=bd6c022d-b2da-4b7e-9001-0af9fe78716f, infoPort=40745, infoSecurePort=0, ipcPort=35581, storageInfo=lv=-57;cid=testClusterID;nsid=848947567;c=1733252158340), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-03T18:56:00,239 WARN [Thread-1230 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e40a913-c135-17dc-08ad-3f47afab457a/cluster_e67e287f-8396-1ac8-75f8-a9eceb442aa7/data/data3/current/BP-1677972484-172.17.0.2-1733252158340/current, will proceed with Du for space computation calculation, 2024-12-03T18:56:00,239 WARN [Thread-1231 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e40a913-c135-17dc-08ad-3f47afab457a/cluster_e67e287f-8396-1ac8-75f8-a9eceb442aa7/data/data4/current/BP-1677972484-172.17.0.2-1733252158340/current, will proceed with Du for space computation calculation, 2024-12-03T18:56:00,258 WARN [Thread-1206 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-03T18:56:00,261 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xf2a5de3c14d71381 with lease ID 0x841cd9ab2fcf6019: Processing first storage report for DS-0094e978-a7b9-4c29-8fe8-d98674bcab81 from datanode DatanodeRegistration(127.0.0.1:38945, datanodeUuid=72a14fa2-1e94-48af-8932-1d0a79709813, infoPort=33109, infoSecurePort=0, ipcPort=43371, storageInfo=lv=-57;cid=testClusterID;nsid=848947567;c=1733252158340) 2024-12-03T18:56:00,261 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf2a5de3c14d71381 with lease ID 0x841cd9ab2fcf6019: from storage DS-0094e978-a7b9-4c29-8fe8-d98674bcab81 node DatanodeRegistration(127.0.0.1:38945, datanodeUuid=72a14fa2-1e94-48af-8932-1d0a79709813, infoPort=33109, infoSecurePort=0, ipcPort=43371, storageInfo=lv=-57;cid=testClusterID;nsid=848947567;c=1733252158340), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-03T18:56:00,261 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xf2a5de3c14d71381 with lease ID 0x841cd9ab2fcf6019: Processing first storage report for DS-cf86c6ae-8be6-4d8f-bb27-7299f0ef92e9 from datanode DatanodeRegistration(127.0.0.1:38945, datanodeUuid=72a14fa2-1e94-48af-8932-1d0a79709813, infoPort=33109, infoSecurePort=0, ipcPort=43371, storageInfo=lv=-57;cid=testClusterID;nsid=848947567;c=1733252158340) 2024-12-03T18:56:00,261 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf2a5de3c14d71381 with lease ID 0x841cd9ab2fcf6019: from storage DS-cf86c6ae-8be6-4d8f-bb27-7299f0ef92e9 node DatanodeRegistration(127.0.0.1:38945, datanodeUuid=72a14fa2-1e94-48af-8932-1d0a79709813, infoPort=33109, infoSecurePort=0, ipcPort=43371, storageInfo=lv=-57;cid=testClusterID;nsid=848947567;c=1733252158340), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-03T18:56:00,313 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e40a913-c135-17dc-08ad-3f47afab457a 2024-12-03T18:56:00,318 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e40a913-c135-17dc-08ad-3f47afab457a/cluster_e67e287f-8396-1ac8-75f8-a9eceb442aa7/zookeeper_0, clientPort=60968, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e40a913-c135-17dc-08ad-3f47afab457a/cluster_e67e287f-8396-1ac8-75f8-a9eceb442aa7/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e40a913-c135-17dc-08ad-3f47afab457a/cluster_e67e287f-8396-1ac8-75f8-a9eceb442aa7/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-03T18:56:00,320 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=60968 2024-12-03T18:56:00,320 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T18:56:00,323 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T18:56:00,332 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46217 is added to blk_1073741825_1001 (size=7) 2024-12-03T18:56:00,332 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38945 is added to blk_1073741825_1001 (size=7) 2024-12-03T18:56:00,333 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:36677/user/jenkins/test-data/fc8b9215-fcf9-a292-1a6c-2c28cacba1d7 with version=8 2024-12-03T18:56:00,333 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:42673/user/jenkins/test-data/6807bb60-f78c-0ede-7870-b94fe8933208/hbase-staging 2024-12-03T18:56:00,335 INFO [Time-limited test {}] client.ConnectionUtils(128): master/db5a5ccf5be8:0 server-side Connection retries=45 2024-12-03T18:56:00,335 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-03T18:56:00,335 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-03T18:56:00,335 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-03T18:56:00,335 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-03T18:56:00,335 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-03T18:56:00,336 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-03T18:56:00,336 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-03T18:56:00,337 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:45865 2024-12-03T18:56:00,338 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:45865 connecting to ZooKeeper ensemble=127.0.0.1:60968 2024-12-03T18:56:00,370 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,44293,1733252112865/db5a5ccf5be8%2C44293%2C1733252112865.1733252113091 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:56:00,393 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:458650x0, quorum=127.0.0.1:60968, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-03T18:56:00,394 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:45865-0x1019c8d16e80000 connected 2024-12-03T18:56:00,408 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.meta.1733252112663.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:56:00,477 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T18:56:00,481 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T18:56:00,484 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:45865-0x1019c8d16e80000, quorum=127.0.0.1:60968, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-03T18:56:00,484 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:36677/user/jenkins/test-data/fc8b9215-fcf9-a292-1a6c-2c28cacba1d7, hbase.cluster.distributed=false 2024-12-03T18:56:00,486 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:45865-0x1019c8d16e80000, quorum=127.0.0.1:60968, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-03T18:56:00,487 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=45865 2024-12-03T18:56:00,487 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=45865 2024-12-03T18:56:00,487 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=45865 2024-12-03T18:56:00,488 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=45865 2024-12-03T18:56:00,488 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=45865 2024-12-03T18:56:00,504 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/db5a5ccf5be8:0 server-side Connection retries=45 2024-12-03T18:56:00,504 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-03T18:56:00,504 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-03T18:56:00,504 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-03T18:56:00,504 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-03T18:56:00,504 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-03T18:56:00,504 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-03T18:56:00,504 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-03T18:56:00,505 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:45949 2024-12-03T18:56:00,506 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:45949 connecting to ZooKeeper ensemble=127.0.0.1:60968 2024-12-03T18:56:00,506 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T18:56:00,507 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T18:56:00,518 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:459490x0, quorum=127.0.0.1:60968, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-03T18:56:00,519 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:459490x0, quorum=127.0.0.1:60968, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-03T18:56:00,519 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:45949-0x1019c8d16e80001 connected 2024-12-03T18:56:00,519 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-03T18:56:00,519 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-03T18:56:00,520 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45949-0x1019c8d16e80001, quorum=127.0.0.1:60968, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-03T18:56:00,521 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45949-0x1019c8d16e80001, quorum=127.0.0.1:60968, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-03T18:56:00,522 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=45949 2024-12-03T18:56:00,522 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=45949 2024-12-03T18:56:00,524 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=45949 2024-12-03T18:56:00,524 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=45949 2024-12-03T18:56:00,526 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=45949 2024-12-03T18:56:00,535 DEBUG [M:0;db5a5ccf5be8:45865 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;db5a5ccf5be8:45865 2024-12-03T18:56:00,536 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/db5a5ccf5be8,45865,1733252160335 2024-12-03T18:56:00,546 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45949-0x1019c8d16e80001, quorum=127.0.0.1:60968, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-03T18:56:00,546 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45865-0x1019c8d16e80000, quorum=127.0.0.1:60968, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-03T18:56:00,547 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:45865-0x1019c8d16e80000, quorum=127.0.0.1:60968, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/db5a5ccf5be8,45865,1733252160335 2024-12-03T18:56:00,557 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45865-0x1019c8d16e80000, quorum=127.0.0.1:60968, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T18:56:00,557 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45949-0x1019c8d16e80001, quorum=127.0.0.1:60968, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-03T18:56:00,557 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45949-0x1019c8d16e80001, quorum=127.0.0.1:60968, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T18:56:00,558 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:45865-0x1019c8d16e80000, quorum=127.0.0.1:60968, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-03T18:56:00,558 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/db5a5ccf5be8,45865,1733252160335 from backup master directory 2024-12-03T18:56:00,567 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45865-0x1019c8d16e80000, quorum=127.0.0.1:60968, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/db5a5ccf5be8,45865,1733252160335 2024-12-03T18:56:00,567 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45949-0x1019c8d16e80001, quorum=127.0.0.1:60968, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-03T18:56:00,567 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45865-0x1019c8d16e80000, quorum=127.0.0.1:60968, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-03T18:56:00,567 WARN [master/db5a5ccf5be8:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-03T18:56:00,568 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=db5a5ccf5be8,45865,1733252160335 2024-12-03T18:56:00,572 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:36677/user/jenkins/test-data/fc8b9215-fcf9-a292-1a6c-2c28cacba1d7/hbase.id] with ID: 3537b81e-b1c5-4891-8179-aae57e9f704b 2024-12-03T18:56:00,572 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:36677/user/jenkins/test-data/fc8b9215-fcf9-a292-1a6c-2c28cacba1d7/.tmp/hbase.id 2024-12-03T18:56:00,578 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46217 is added to blk_1073741826_1002 (size=42) 2024-12-03T18:56:00,579 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38945 is added to blk_1073741826_1002 (size=42) 2024-12-03T18:56:00,579 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:36677/user/jenkins/test-data/fc8b9215-fcf9-a292-1a6c-2c28cacba1d7/.tmp/hbase.id]:[hdfs://localhost:36677/user/jenkins/test-data/fc8b9215-fcf9-a292-1a6c-2c28cacba1d7/hbase.id] 2024-12-03T18:56:00,590 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T18:56:00,591 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-03T18:56:00,592 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-12-03T18:56:00,602 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45949-0x1019c8d16e80001, quorum=127.0.0.1:60968, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T18:56:00,602 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45865-0x1019c8d16e80000, quorum=127.0.0.1:60968, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T18:56:00,609 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38945 is added to blk_1073741827_1003 (size=196) 2024-12-03T18:56:00,609 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46217 is added to blk_1073741827_1003 (size=196) 2024-12-03T18:56:00,609 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-03T18:56:00,610 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-03T18:56:00,610 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-03T18:56:00,616 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38945 is added to blk_1073741828_1004 (size=1189) 2024-12-03T18:56:00,616 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46217 is added to blk_1073741828_1004 (size=1189) 2024-12-03T18:56:00,617 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:36677/user/jenkins/test-data/fc8b9215-fcf9-a292-1a6c-2c28cacba1d7/MasterData/data/master/store 2024-12-03T18:56:00,624 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46217 is added to blk_1073741829_1005 (size=34) 2024-12-03T18:56:00,625 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38945 is added to blk_1073741829_1005 (size=34) 2024-12-03T18:56:00,625 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T18:56:00,625 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-03T18:56:00,625 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T18:56:00,625 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T18:56:00,625 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-03T18:56:00,625 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T18:56:00,625 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T18:56:00,625 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733252160625Disabling compacts and flushes for region at 1733252160625Disabling writes for close at 1733252160625Writing region close event to WAL at 1733252160625Closed at 1733252160625 2024-12-03T18:56:00,626 WARN [master/db5a5ccf5be8:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:36677/user/jenkins/test-data/fc8b9215-fcf9-a292-1a6c-2c28cacba1d7/MasterData/data/master/store/.initializing 2024-12-03T18:56:00,626 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:36677/user/jenkins/test-data/fc8b9215-fcf9-a292-1a6c-2c28cacba1d7/MasterData/WALs/db5a5ccf5be8,45865,1733252160335 2024-12-03T18:56:00,629 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=db5a5ccf5be8%2C45865%2C1733252160335, suffix=, logDir=hdfs://localhost:36677/user/jenkins/test-data/fc8b9215-fcf9-a292-1a6c-2c28cacba1d7/MasterData/WALs/db5a5ccf5be8,45865,1733252160335, archiveDir=hdfs://localhost:36677/user/jenkins/test-data/fc8b9215-fcf9-a292-1a6c-2c28cacba1d7/MasterData/oldWALs, maxLogs=10 2024-12-03T18:56:00,629 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor db5a5ccf5be8%2C45865%2C1733252160335.1733252160629 2024-12-03T18:56:00,634 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/fc8b9215-fcf9-a292-1a6c-2c28cacba1d7/MasterData/WALs/db5a5ccf5be8,45865,1733252160335/db5a5ccf5be8%2C45865%2C1733252160335.1733252160629 2024-12-03T18:56:00,635 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40745:40745),(127.0.0.1/127.0.0.1:33109:33109)] 2024-12-03T18:56:00,635 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-03T18:56:00,636 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T18:56:00,636 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-03T18:56:00,636 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-03T18:56:00,638 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-03T18:56:00,639 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-03T18:56:00,640 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T18:56:00,640 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T18:56:00,640 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-03T18:56:00,642 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-03T18:56:00,642 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T18:56:00,642 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T18:56:00,642 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-03T18:56:00,643 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-03T18:56:00,644 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T18:56:00,644 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T18:56:00,644 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-03T18:56:00,646 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-03T18:56:00,646 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T18:56:00,646 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T18:56:00,646 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-03T18:56:00,647 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36677/user/jenkins/test-data/fc8b9215-fcf9-a292-1a6c-2c28cacba1d7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-03T18:56:00,647 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36677/user/jenkins/test-data/fc8b9215-fcf9-a292-1a6c-2c28cacba1d7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-03T18:56:00,648 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-03T18:56:00,648 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-03T18:56:00,649 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-03T18:56:00,650 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-03T18:56:00,652 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36677/user/jenkins/test-data/fc8b9215-fcf9-a292-1a6c-2c28cacba1d7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-03T18:56:00,652 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=849682, jitterRate=0.08042682707309723}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-03T18:56:00,652 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733252160636Initializing all the Stores at 1733252160636Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733252160637 (+1 ms)Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733252160638 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733252160638Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733252160638Cleaning up temporary data from old regions at 1733252160648 (+10 ms)Region opened successfully at 1733252160652 (+4 ms) 2024-12-03T18:56:00,653 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-03T18:56:00,656 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@e663da3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=db5a5ccf5be8/172.17.0.2:0 2024-12-03T18:56:00,657 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-03T18:56:00,657 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-03T18:56:00,657 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-03T18:56:00,657 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-03T18:56:00,657 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-12-03T18:56:00,658 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-12-03T18:56:00,658 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-03T18:56:00,660 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-03T18:56:00,661 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45865-0x1019c8d16e80000, quorum=127.0.0.1:60968, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-03T18:56:00,673 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-03T18:56:00,673 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-03T18:56:00,674 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45865-0x1019c8d16e80000, quorum=127.0.0.1:60968, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-03T18:56:00,683 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-03T18:56:00,684 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-03T18:56:00,685 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45865-0x1019c8d16e80000, quorum=127.0.0.1:60968, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-03T18:56:00,697 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-03T18:56:00,699 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45865-0x1019c8d16e80000, quorum=127.0.0.1:60968, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-03T18:56:00,708 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-03T18:56:00,711 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45865-0x1019c8d16e80000, quorum=127.0.0.1:60968, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-03T18:56:00,718 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-03T18:56:00,729 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45865-0x1019c8d16e80000, quorum=127.0.0.1:60968, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-03T18:56:00,729 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45949-0x1019c8d16e80001, quorum=127.0.0.1:60968, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-03T18:56:00,729 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45949-0x1019c8d16e80001, quorum=127.0.0.1:60968, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T18:56:00,729 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45865-0x1019c8d16e80000, quorum=127.0.0.1:60968, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T18:56:00,730 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=db5a5ccf5be8,45865,1733252160335, sessionid=0x1019c8d16e80000, setting cluster-up flag (Was=false) 2024-12-03T18:56:00,750 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45865-0x1019c8d16e80000, quorum=127.0.0.1:60968, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T18:56:00,750 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45949-0x1019c8d16e80001, quorum=127.0.0.1:60968, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T18:56:00,781 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-03T18:56:00,783 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=db5a5ccf5be8,45865,1733252160335 2024-12-03T18:56:00,802 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45949-0x1019c8d16e80001, quorum=127.0.0.1:60968, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T18:56:00,802 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45865-0x1019c8d16e80000, quorum=127.0.0.1:60968, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T18:56:00,834 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-03T18:56:00,835 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=db5a5ccf5be8,45865,1733252160335 2024-12-03T18:56:00,837 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:36677/user/jenkins/test-data/fc8b9215-fcf9-a292-1a6c-2c28cacba1d7/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-03T18:56:00,838 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-03T18:56:00,839 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-03T18:56:00,839 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-03T18:56:00,839 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: db5a5ccf5be8,45865,1733252160335 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-03T18:56:00,840 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/db5a5ccf5be8:0, corePoolSize=5, maxPoolSize=5 2024-12-03T18:56:00,840 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/db5a5ccf5be8:0, corePoolSize=5, maxPoolSize=5 2024-12-03T18:56:00,840 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/db5a5ccf5be8:0, corePoolSize=5, maxPoolSize=5 2024-12-03T18:56:00,840 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/db5a5ccf5be8:0, corePoolSize=5, maxPoolSize=5 2024-12-03T18:56:00,840 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/db5a5ccf5be8:0, corePoolSize=10, maxPoolSize=10 2024-12-03T18:56:00,840 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/db5a5ccf5be8:0, corePoolSize=1, maxPoolSize=1 2024-12-03T18:56:00,841 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/db5a5ccf5be8:0, corePoolSize=2, maxPoolSize=2 2024-12-03T18:56:00,841 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/db5a5ccf5be8:0, corePoolSize=1, maxPoolSize=1 2024-12-03T18:56:00,842 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733252190841 2024-12-03T18:56:00,842 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-03T18:56:00,842 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-03T18:56:00,842 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-03T18:56:00,842 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-03T18:56:00,842 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-03T18:56:00,842 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-03T18:56:00,842 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-03T18:56:00,842 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-03T18:56:00,842 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-03T18:56:00,842 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-03T18:56:00,842 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-03T18:56:00,843 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-03T18:56:00,843 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-03T18:56:00,843 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-03T18:56:00,843 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/db5a5ccf5be8:0:becomeActiveMaster-HFileCleaner.large.0-1733252160843,5,FailOnTimeoutGroup] 2024-12-03T18:56:00,843 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/db5a5ccf5be8:0:becomeActiveMaster-HFileCleaner.small.0-1733252160843,5,FailOnTimeoutGroup] 2024-12-03T18:56:00,843 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-03T18:56:00,843 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-03T18:56:00,843 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-03T18:56:00,843 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-03T18:56:00,843 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T18:56:00,844 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-03T18:56:00,855 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46217 is added to blk_1073741831_1007 (size=1321) 2024-12-03T18:56:00,855 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38945 is added to blk_1073741831_1007 (size=1321) 2024-12-03T18:56:00,856 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:36677/user/jenkins/test-data/fc8b9215-fcf9-a292-1a6c-2c28cacba1d7/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-03T18:56:00,856 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:36677/user/jenkins/test-data/fc8b9215-fcf9-a292-1a6c-2c28cacba1d7 2024-12-03T18:56:00,865 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38945 is added to blk_1073741832_1008 (size=32) 2024-12-03T18:56:00,866 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46217 is added to blk_1073741832_1008 (size=32) 2024-12-03T18:56:00,866 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T18:56:00,867 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-03T18:56:00,869 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-03T18:56:00,869 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T18:56:00,869 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T18:56:00,869 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-03T18:56:00,870 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-03T18:56:00,870 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T18:56:00,871 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T18:56:00,871 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-03T18:56:00,872 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-03T18:56:00,872 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T18:56:00,872 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T18:56:00,872 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-03T18:56:00,873 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-03T18:56:00,873 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T18:56:00,874 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T18:56:00,874 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-03T18:56:00,874 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36677/user/jenkins/test-data/fc8b9215-fcf9-a292-1a6c-2c28cacba1d7/data/hbase/meta/1588230740 2024-12-03T18:56:00,875 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36677/user/jenkins/test-data/fc8b9215-fcf9-a292-1a6c-2c28cacba1d7/data/hbase/meta/1588230740 2024-12-03T18:56:00,876 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-03T18:56:00,876 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-03T18:56:00,876 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-03T18:56:00,877 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-03T18:56:00,879 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36677/user/jenkins/test-data/fc8b9215-fcf9-a292-1a6c-2c28cacba1d7/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-03T18:56:00,879 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=729887, jitterRate=-0.07190191745758057}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-03T18:56:00,880 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733252160866Initializing all the Stores at 1733252160867 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733252160867Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733252160867Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733252160867Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733252160867Cleaning up temporary data from old regions at 1733252160876 (+9 ms)Region opened successfully at 1733252160880 (+4 ms) 2024-12-03T18:56:00,880 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-03T18:56:00,880 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-03T18:56:00,880 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-03T18:56:00,880 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-03T18:56:00,880 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-03T18:56:00,880 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-03T18:56:00,880 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733252160880Disabling compacts and flushes for region at 1733252160880Disabling writes for close at 1733252160880Writing region close event to WAL at 1733252160880Closed at 1733252160880 2024-12-03T18:56:00,881 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-03T18:56:00,882 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-03T18:56:00,882 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-03T18:56:00,883 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-03T18:56:00,884 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-03T18:56:00,928 INFO [RS:0;db5a5ccf5be8:45949 {}] regionserver.HRegionServer(746): ClusterId : 3537b81e-b1c5-4891-8179-aae57e9f704b 2024-12-03T18:56:00,928 DEBUG [RS:0;db5a5ccf5be8:45949 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-03T18:56:00,941 DEBUG [RS:0;db5a5ccf5be8:45949 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-03T18:56:00,941 DEBUG [RS:0;db5a5ccf5be8:45949 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-03T18:56:00,951 DEBUG [RS:0;db5a5ccf5be8:45949 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-03T18:56:00,951 DEBUG [RS:0;db5a5ccf5be8:45949 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@39251207, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=db5a5ccf5be8/172.17.0.2:0 2024-12-03T18:56:00,961 DEBUG [RS:0;db5a5ccf5be8:45949 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;db5a5ccf5be8:45949 2024-12-03T18:56:00,961 INFO [RS:0;db5a5ccf5be8:45949 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-03T18:56:00,961 INFO [RS:0;db5a5ccf5be8:45949 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-03T18:56:00,961 DEBUG [RS:0;db5a5ccf5be8:45949 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-03T18:56:00,962 INFO [RS:0;db5a5ccf5be8:45949 {}] regionserver.HRegionServer(2659): reportForDuty to master=db5a5ccf5be8,45865,1733252160335 with port=45949, startcode=1733252160503 2024-12-03T18:56:00,962 DEBUG [RS:0;db5a5ccf5be8:45949 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-03T18:56:00,964 INFO [HMaster-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37633, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.4 (auth:SIMPLE), service=RegionServerStatusService 2024-12-03T18:56:00,965 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45865 {}] master.ServerManager(363): Checking decommissioned status of RegionServer db5a5ccf5be8,45949,1733252160503 2024-12-03T18:56:00,965 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45865 {}] master.ServerManager(517): Registering regionserver=db5a5ccf5be8,45949,1733252160503 2024-12-03T18:56:00,967 DEBUG [RS:0;db5a5ccf5be8:45949 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:36677/user/jenkins/test-data/fc8b9215-fcf9-a292-1a6c-2c28cacba1d7 2024-12-03T18:56:00,967 DEBUG [RS:0;db5a5ccf5be8:45949 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:36677 2024-12-03T18:56:00,967 DEBUG [RS:0;db5a5ccf5be8:45949 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-03T18:56:00,978 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45865-0x1019c8d16e80000, quorum=127.0.0.1:60968, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-03T18:56:00,979 DEBUG [RS:0;db5a5ccf5be8:45949 {}] zookeeper.ZKUtil(111): regionserver:45949-0x1019c8d16e80001, quorum=127.0.0.1:60968, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/db5a5ccf5be8,45949,1733252160503 2024-12-03T18:56:00,979 WARN [RS:0;db5a5ccf5be8:45949 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-03T18:56:00,979 INFO [RS:0;db5a5ccf5be8:45949 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-03T18:56:00,979 DEBUG [RS:0;db5a5ccf5be8:45949 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:36677/user/jenkins/test-data/fc8b9215-fcf9-a292-1a6c-2c28cacba1d7/WALs/db5a5ccf5be8,45949,1733252160503 2024-12-03T18:56:00,979 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [db5a5ccf5be8,45949,1733252160503] 2024-12-03T18:56:00,982 INFO [RS:0;db5a5ccf5be8:45949 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-03T18:56:00,983 INFO [RS:0;db5a5ccf5be8:45949 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-03T18:56:00,984 INFO [RS:0;db5a5ccf5be8:45949 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-03T18:56:00,984 INFO [RS:0;db5a5ccf5be8:45949 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-03T18:56:00,984 INFO [RS:0;db5a5ccf5be8:45949 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-03T18:56:00,985 INFO [RS:0;db5a5ccf5be8:45949 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-03T18:56:00,985 INFO [RS:0;db5a5ccf5be8:45949 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-03T18:56:00,985 DEBUG [RS:0;db5a5ccf5be8:45949 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/db5a5ccf5be8:0, corePoolSize=1, maxPoolSize=1 2024-12-03T18:56:00,985 DEBUG [RS:0;db5a5ccf5be8:45949 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/db5a5ccf5be8:0, corePoolSize=1, maxPoolSize=1 2024-12-03T18:56:00,985 DEBUG [RS:0;db5a5ccf5be8:45949 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/db5a5ccf5be8:0, corePoolSize=1, maxPoolSize=1 2024-12-03T18:56:00,985 DEBUG [RS:0;db5a5ccf5be8:45949 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/db5a5ccf5be8:0, corePoolSize=1, maxPoolSize=1 2024-12-03T18:56:00,985 DEBUG [RS:0;db5a5ccf5be8:45949 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/db5a5ccf5be8:0, corePoolSize=1, maxPoolSize=1 2024-12-03T18:56:00,985 DEBUG [RS:0;db5a5ccf5be8:45949 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/db5a5ccf5be8:0, corePoolSize=2, maxPoolSize=2 2024-12-03T18:56:00,985 DEBUG [RS:0;db5a5ccf5be8:45949 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/db5a5ccf5be8:0, corePoolSize=1, maxPoolSize=1 2024-12-03T18:56:00,985 DEBUG [RS:0;db5a5ccf5be8:45949 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/db5a5ccf5be8:0, corePoolSize=1, maxPoolSize=1 2024-12-03T18:56:00,985 DEBUG [RS:0;db5a5ccf5be8:45949 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/db5a5ccf5be8:0, corePoolSize=1, maxPoolSize=1 2024-12-03T18:56:00,985 DEBUG [RS:0;db5a5ccf5be8:45949 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/db5a5ccf5be8:0, corePoolSize=1, maxPoolSize=1 2024-12-03T18:56:00,985 DEBUG [RS:0;db5a5ccf5be8:45949 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/db5a5ccf5be8:0, corePoolSize=1, maxPoolSize=1 2024-12-03T18:56:00,985 DEBUG [RS:0;db5a5ccf5be8:45949 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/db5a5ccf5be8:0, corePoolSize=1, maxPoolSize=1 2024-12-03T18:56:00,985 DEBUG [RS:0;db5a5ccf5be8:45949 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/db5a5ccf5be8:0, corePoolSize=3, maxPoolSize=3 2024-12-03T18:56:00,985 DEBUG [RS:0;db5a5ccf5be8:45949 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/db5a5ccf5be8:0, corePoolSize=3, maxPoolSize=3 2024-12-03T18:56:00,986 INFO [RS:0;db5a5ccf5be8:45949 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-03T18:56:00,986 INFO [RS:0;db5a5ccf5be8:45949 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-03T18:56:00,986 INFO [RS:0;db5a5ccf5be8:45949 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-03T18:56:00,986 INFO [RS:0;db5a5ccf5be8:45949 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-03T18:56:00,986 INFO [RS:0;db5a5ccf5be8:45949 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-03T18:56:00,986 INFO [RS:0;db5a5ccf5be8:45949 {}] hbase.ChoreService(168): Chore ScheduledChore name=db5a5ccf5be8,45949,1733252160503-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-03T18:56:01,003 INFO [RS:0;db5a5ccf5be8:45949 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-03T18:56:01,003 INFO [RS:0;db5a5ccf5be8:45949 {}] hbase.ChoreService(168): Chore ScheduledChore name=db5a5ccf5be8,45949,1733252160503-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-03T18:56:01,003 INFO [RS:0;db5a5ccf5be8:45949 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T18:56:01,003 INFO [RS:0;db5a5ccf5be8:45949 {}] regionserver.Replication(171): db5a5ccf5be8,45949,1733252160503 started 2024-12-03T18:56:01,015 INFO [RS:0;db5a5ccf5be8:45949 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T18:56:01,015 INFO [RS:0;db5a5ccf5be8:45949 {}] regionserver.HRegionServer(1482): Serving as db5a5ccf5be8,45949,1733252160503, RpcServer on db5a5ccf5be8/172.17.0.2:45949, sessionid=0x1019c8d16e80001 2024-12-03T18:56:01,015 DEBUG [RS:0;db5a5ccf5be8:45949 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-03T18:56:01,015 DEBUG [RS:0;db5a5ccf5be8:45949 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager db5a5ccf5be8,45949,1733252160503 2024-12-03T18:56:01,015 DEBUG [RS:0;db5a5ccf5be8:45949 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'db5a5ccf5be8,45949,1733252160503' 2024-12-03T18:56:01,015 DEBUG [RS:0;db5a5ccf5be8:45949 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-03T18:56:01,016 DEBUG [RS:0;db5a5ccf5be8:45949 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-03T18:56:01,016 DEBUG [RS:0;db5a5ccf5be8:45949 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-03T18:56:01,016 DEBUG [RS:0;db5a5ccf5be8:45949 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-03T18:56:01,016 DEBUG [RS:0;db5a5ccf5be8:45949 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager db5a5ccf5be8,45949,1733252160503 2024-12-03T18:56:01,016 DEBUG [RS:0;db5a5ccf5be8:45949 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'db5a5ccf5be8,45949,1733252160503' 2024-12-03T18:56:01,016 DEBUG [RS:0;db5a5ccf5be8:45949 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-03T18:56:01,016 DEBUG [RS:0;db5a5ccf5be8:45949 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-03T18:56:01,017 DEBUG [RS:0;db5a5ccf5be8:45949 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-03T18:56:01,017 INFO [RS:0;db5a5ccf5be8:45949 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-03T18:56:01,017 INFO [RS:0;db5a5ccf5be8:45949 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-03T18:56:01,034 WARN [db5a5ccf5be8:45865 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-03T18:56:01,121 INFO [RS:0;db5a5ccf5be8:45949 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=db5a5ccf5be8%2C45949%2C1733252160503, suffix=, logDir=hdfs://localhost:36677/user/jenkins/test-data/fc8b9215-fcf9-a292-1a6c-2c28cacba1d7/WALs/db5a5ccf5be8,45949,1733252160503, archiveDir=hdfs://localhost:36677/user/jenkins/test-data/fc8b9215-fcf9-a292-1a6c-2c28cacba1d7/oldWALs, maxLogs=32 2024-12-03T18:56:01,122 INFO [RS:0;db5a5ccf5be8:45949 {}] monitor.StreamSlowMonitor(122): New stream slow monitor db5a5ccf5be8%2C45949%2C1733252160503.1733252161122 2024-12-03T18:56:01,131 INFO [RS:0;db5a5ccf5be8:45949 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/fc8b9215-fcf9-a292-1a6c-2c28cacba1d7/WALs/db5a5ccf5be8,45949,1733252160503/db5a5ccf5be8%2C45949%2C1733252160503.1733252161122 2024-12-03T18:56:01,132 DEBUG [RS:0;db5a5ccf5be8:45949 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33109:33109),(127.0.0.1/127.0.0.1:40745:40745)] 2024-12-03T18:56:01,284 DEBUG [db5a5ccf5be8:45865 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-03T18:56:01,285 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=db5a5ccf5be8,45949,1733252160503 2024-12-03T18:56:01,286 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as db5a5ccf5be8,45949,1733252160503, state=OPENING 2024-12-03T18:56:01,297 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-03T18:56:01,308 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45949-0x1019c8d16e80001, quorum=127.0.0.1:60968, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T18:56:01,308 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45865-0x1019c8d16e80000, quorum=127.0.0.1:60968, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T18:56:01,309 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-03T18:56:01,309 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-03T18:56:01,309 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-03T18:56:01,309 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=db5a5ccf5be8,45949,1733252160503}] 2024-12-03T18:56:01,371 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,44293,1733252112865/db5a5ccf5be8%2C44293%2C1733252112865.1733252113091 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:56:01,409 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.meta.1733252112663.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:56:01,466 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-03T18:56:01,471 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47019, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-03T18:56:01,478 INFO [RS_OPEN_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-03T18:56:01,478 INFO [RS_OPEN_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-03T18:56:01,481 INFO [RS_OPEN_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=db5a5ccf5be8%2C45949%2C1733252160503.meta, suffix=.meta, logDir=hdfs://localhost:36677/user/jenkins/test-data/fc8b9215-fcf9-a292-1a6c-2c28cacba1d7/WALs/db5a5ccf5be8,45949,1733252160503, archiveDir=hdfs://localhost:36677/user/jenkins/test-data/fc8b9215-fcf9-a292-1a6c-2c28cacba1d7/oldWALs, maxLogs=32 2024-12-03T18:56:01,481 INFO [RS_OPEN_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor db5a5ccf5be8%2C45949%2C1733252160503.meta.1733252161481.meta 2024-12-03T18:56:01,488 INFO [RS_OPEN_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/fc8b9215-fcf9-a292-1a6c-2c28cacba1d7/WALs/db5a5ccf5be8,45949,1733252160503/db5a5ccf5be8%2C45949%2C1733252160503.meta.1733252161481.meta 2024-12-03T18:56:01,489 DEBUG [RS_OPEN_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40745:40745),(127.0.0.1/127.0.0.1:33109:33109)] 2024-12-03T18:56:01,491 DEBUG [RS_OPEN_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-03T18:56:01,491 DEBUG [RS_OPEN_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-03T18:56:01,491 DEBUG [RS_OPEN_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-03T18:56:01,491 INFO [RS_OPEN_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-03T18:56:01,491 DEBUG [RS_OPEN_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-03T18:56:01,491 DEBUG [RS_OPEN_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T18:56:01,491 DEBUG [RS_OPEN_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-03T18:56:01,492 DEBUG [RS_OPEN_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-03T18:56:01,493 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-03T18:56:01,495 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-03T18:56:01,495 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T18:56:01,495 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T18:56:01,496 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-03T18:56:01,497 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-03T18:56:01,497 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T18:56:01,497 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T18:56:01,497 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-03T18:56:01,498 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-03T18:56:01,498 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T18:56:01,499 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T18:56:01,499 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-03T18:56:01,500 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-03T18:56:01,500 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T18:56:01,501 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T18:56:01,501 DEBUG [RS_OPEN_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-03T18:56:01,501 DEBUG [RS_OPEN_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36677/user/jenkins/test-data/fc8b9215-fcf9-a292-1a6c-2c28cacba1d7/data/hbase/meta/1588230740 2024-12-03T18:56:01,502 DEBUG [RS_OPEN_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36677/user/jenkins/test-data/fc8b9215-fcf9-a292-1a6c-2c28cacba1d7/data/hbase/meta/1588230740 2024-12-03T18:56:01,504 DEBUG [RS_OPEN_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-03T18:56:01,504 DEBUG [RS_OPEN_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-03T18:56:01,504 DEBUG [RS_OPEN_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-03T18:56:01,506 DEBUG [RS_OPEN_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-03T18:56:01,506 INFO [RS_OPEN_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=882646, jitterRate=0.12234334647655487}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-03T18:56:01,506 DEBUG [RS_OPEN_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-03T18:56:01,507 DEBUG [RS_OPEN_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733252161492Writing region info on filesystem at 1733252161492Initializing all the Stores at 1733252161493 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733252161493Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733252161493Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733252161493Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733252161493Cleaning up temporary data from old regions at 1733252161504 (+11 ms)Running coprocessor post-open hooks at 1733252161506 (+2 ms)Region opened successfully at 1733252161507 (+1 ms) 2024-12-03T18:56:01,508 INFO [RS_OPEN_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733252161465 2024-12-03T18:56:01,510 DEBUG [RS_OPEN_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-03T18:56:01,510 INFO [RS_OPEN_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-03T18:56:01,511 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=db5a5ccf5be8,45949,1733252160503 2024-12-03T18:56:01,512 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as db5a5ccf5be8,45949,1733252160503, state=OPEN 2024-12-03T18:56:01,587 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45865-0x1019c8d16e80000, quorum=127.0.0.1:60968, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-03T18:56:01,587 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45949-0x1019c8d16e80001, quorum=127.0.0.1:60968, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-03T18:56:01,587 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=db5a5ccf5be8,45949,1733252160503 2024-12-03T18:56:01,587 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-03T18:56:01,587 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-03T18:56:01,593 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-03T18:56:01,594 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=db5a5ccf5be8,45949,1733252160503 in 278 msec 2024-12-03T18:56:01,599 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-03T18:56:01,599 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 712 msec 2024-12-03T18:56:01,600 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-03T18:56:01,600 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-03T18:56:01,601 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-03T18:56:01,602 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=db5a5ccf5be8,45949,1733252160503, seqNum=-1] 2024-12-03T18:56:01,602 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-03T18:56:01,603 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40927, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-03T18:56:01,609 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 770 msec 2024-12-03T18:56:01,610 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733252161609, completionTime=-1 2024-12-03T18:56:01,610 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-03T18:56:01,610 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-03T18:56:01,612 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-12-03T18:56:01,612 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733252221612 2024-12-03T18:56:01,612 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733252281612 2024-12-03T18:56:01,612 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-12-03T18:56:01,612 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=db5a5ccf5be8,45865,1733252160335-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-03T18:56:01,612 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=db5a5ccf5be8,45865,1733252160335-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T18:56:01,613 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=db5a5ccf5be8,45865,1733252160335-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T18:56:01,613 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-db5a5ccf5be8:45865, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T18:56:01,613 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-03T18:56:01,613 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-03T18:56:01,615 DEBUG [master/db5a5ccf5be8:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-03T18:56:01,618 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.049sec 2024-12-03T18:56:01,618 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-03T18:56:01,618 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-03T18:56:01,618 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-03T18:56:01,618 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-03T18:56:01,618 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-03T18:56:01,618 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=db5a5ccf5be8,45865,1733252160335-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-03T18:56:01,618 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=db5a5ccf5be8,45865,1733252160335-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-03T18:56:01,621 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-03T18:56:01,622 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-03T18:56:01,622 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=db5a5ccf5be8,45865,1733252160335-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T18:56:01,629 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3d428c33, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T18:56:01,629 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request db5a5ccf5be8,45865,-1 for getting cluster id 2024-12-03T18:56:01,629 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-03T18:56:01,632 DEBUG [HMaster-EventLoopGroup-10-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '3537b81e-b1c5-4891-8179-aae57e9f704b' 2024-12-03T18:56:01,633 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-03T18:56:01,633 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "3537b81e-b1c5-4891-8179-aae57e9f704b" 2024-12-03T18:56:01,633 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3153ac40, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T18:56:01,633 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [db5a5ccf5be8,45865,-1] 2024-12-03T18:56:01,634 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-03T18:56:01,634 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T18:56:01,636 INFO [HMaster-EventLoopGroup-10-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40196, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-03T18:56:01,637 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7cbeb460, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T18:56:01,638 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-03T18:56:01,639 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=db5a5ccf5be8,45949,1733252160503, seqNum=-1] 2024-12-03T18:56:01,640 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-03T18:56:01,641 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53668, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-03T18:56:01,643 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=db5a5ccf5be8,45865,1733252160335 2024-12-03T18:56:01,644 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T18:56:01,647 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-12-03T18:56:01,647 INFO [Time-limited test {}] wal.TestLogRolling(320): Starting testLogRollOnPipelineRestart 2024-12-03T18:56:01,648 INFO [Time-limited test {}] wal.TestLogRolling(323): Replication=2 2024-12-03T18:56:01,648 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-03T18:56:01,649 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.AsyncConnectionImpl(321): The fetched master address is db5a5ccf5be8,45865,1733252160335 2024-12-03T18:56:01,649 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@37fdcc12 2024-12-03T18:56:01,649 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-03T18:56:01,650 INFO [HMaster-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40212, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-03T18:56:01,651 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45865 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-12-03T18:56:01,651 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45865 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-12-03T18:56:01,651 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45865 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRollOnPipelineRestart', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-03T18:56:01,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45865 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart 2024-12-03T18:56:01,653 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_PRE_OPERATION 2024-12-03T18:56:01,653 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T18:56:01,653 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45865 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRollOnPipelineRestart" procId is: 4 2024-12-03T18:56:01,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45865 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-03T18:56:01,654 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-03T18:56:01,660 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46217 is added to blk_1073741835_1011 (size=395) 2024-12-03T18:56:01,660 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38945 is added to blk_1073741835_1011 (size=395) 2024-12-03T18:56:02,064 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 7e0175e16eb7a4a91438a495c1caf9ae, NAME => 'TestLogRolling-testLogRollOnPipelineRestart,,1733252161650.7e0175e16eb7a4a91438a495c1caf9ae.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRollOnPipelineRestart', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:36677/user/jenkins/test-data/fc8b9215-fcf9-a292-1a6c-2c28cacba1d7 2024-12-03T18:56:02,072 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46217 is added to blk_1073741836_1012 (size=78) 2024-12-03T18:56:02,072 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38945 is added to blk_1073741836_1012 (size=78) 2024-12-03T18:56:02,072 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnPipelineRestart,,1733252161650.7e0175e16eb7a4a91438a495c1caf9ae.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T18:56:02,072 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1722): Closing 7e0175e16eb7a4a91438a495c1caf9ae, disabling compactions & flushes 2024-12-03T18:56:02,072 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnPipelineRestart,,1733252161650.7e0175e16eb7a4a91438a495c1caf9ae. 2024-12-03T18:56:02,072 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnPipelineRestart,,1733252161650.7e0175e16eb7a4a91438a495c1caf9ae. 2024-12-03T18:56:02,073 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnPipelineRestart,,1733252161650.7e0175e16eb7a4a91438a495c1caf9ae. after waiting 1 ms 2024-12-03T18:56:02,073 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnPipelineRestart,,1733252161650.7e0175e16eb7a4a91438a495c1caf9ae. 2024-12-03T18:56:02,073 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnPipelineRestart,,1733252161650.7e0175e16eb7a4a91438a495c1caf9ae. 2024-12-03T18:56:02,073 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1676): Region close journal for 7e0175e16eb7a4a91438a495c1caf9ae: Waiting for close lock at 1733252162072Disabling compacts and flushes for region at 1733252162072Disabling writes for close at 1733252162073 (+1 ms)Writing region close event to WAL at 1733252162073Closed at 1733252162073 2024-12-03T18:56:02,074 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_ADD_TO_META 2024-12-03T18:56:02,074 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRollOnPipelineRestart,,1733252161650.7e0175e16eb7a4a91438a495c1caf9ae.","families":{"info":[{"qualifier":"regioninfo","vlen":77,"tag":[],"timestamp":"1733252162074"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733252162074"}]},"ts":"1733252162074"} 2024-12-03T18:56:02,076 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-03T18:56:02,077 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-03T18:56:02,078 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnPipelineRestart","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733252162077"}]},"ts":"1733252162077"} 2024-12-03T18:56:02,080 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnPipelineRestart, state=ENABLING in hbase:meta 2024-12-03T18:56:02,080 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=7e0175e16eb7a4a91438a495c1caf9ae, ASSIGN}] 2024-12-03T18:56:02,081 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=7e0175e16eb7a4a91438a495c1caf9ae, ASSIGN 2024-12-03T18:56:02,082 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=7e0175e16eb7a4a91438a495c1caf9ae, ASSIGN; state=OFFLINE, location=db5a5ccf5be8,45949,1733252160503; forceNewPlan=false, retain=false 2024-12-03T18:56:02,233 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=7e0175e16eb7a4a91438a495c1caf9ae, regionState=OPENING, regionLocation=db5a5ccf5be8,45949,1733252160503 2024-12-03T18:56:02,238 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-11-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=7e0175e16eb7a4a91438a495c1caf9ae, ASSIGN because future has completed 2024-12-03T18:56:02,239 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 7e0175e16eb7a4a91438a495c1caf9ae, server=db5a5ccf5be8,45949,1733252160503}] 2024-12-03T18:56:02,373 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,44293,1733252112865/db5a5ccf5be8%2C44293%2C1733252112865.1733252113091 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:56:02,405 INFO [RS_OPEN_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRollOnPipelineRestart,,1733252161650.7e0175e16eb7a4a91438a495c1caf9ae. 2024-12-03T18:56:02,405 DEBUG [RS_OPEN_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 7e0175e16eb7a4a91438a495c1caf9ae, NAME => 'TestLogRolling-testLogRollOnPipelineRestart,,1733252161650.7e0175e16eb7a4a91438a495c1caf9ae.', STARTKEY => '', ENDKEY => ''} 2024-12-03T18:56:02,405 DEBUG [RS_OPEN_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRollOnPipelineRestart 7e0175e16eb7a4a91438a495c1caf9ae 2024-12-03T18:56:02,406 DEBUG [RS_OPEN_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnPipelineRestart,,1733252161650.7e0175e16eb7a4a91438a495c1caf9ae.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T18:56:02,406 DEBUG [RS_OPEN_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 7e0175e16eb7a4a91438a495c1caf9ae 2024-12-03T18:56:02,406 DEBUG [RS_OPEN_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 7e0175e16eb7a4a91438a495c1caf9ae 2024-12-03T18:56:02,408 INFO [StoreOpener-7e0175e16eb7a4a91438a495c1caf9ae-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 7e0175e16eb7a4a91438a495c1caf9ae 2024-12-03T18:56:02,409 INFO [StoreOpener-7e0175e16eb7a4a91438a495c1caf9ae-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 7e0175e16eb7a4a91438a495c1caf9ae columnFamilyName info 2024-12-03T18:56:02,410 DEBUG [StoreOpener-7e0175e16eb7a4a91438a495c1caf9ae-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T18:56:02,410 INFO [StoreOpener-7e0175e16eb7a4a91438a495c1caf9ae-1 {}] regionserver.HStore(327): Store=7e0175e16eb7a4a91438a495c1caf9ae/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T18:56:02,410 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.meta.1733252112663.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:56:02,410 DEBUG [RS_OPEN_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 7e0175e16eb7a4a91438a495c1caf9ae 2024-12-03T18:56:02,411 DEBUG [RS_OPEN_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36677/user/jenkins/test-data/fc8b9215-fcf9-a292-1a6c-2c28cacba1d7/data/default/TestLogRolling-testLogRollOnPipelineRestart/7e0175e16eb7a4a91438a495c1caf9ae 2024-12-03T18:56:02,412 DEBUG [RS_OPEN_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36677/user/jenkins/test-data/fc8b9215-fcf9-a292-1a6c-2c28cacba1d7/data/default/TestLogRolling-testLogRollOnPipelineRestart/7e0175e16eb7a4a91438a495c1caf9ae 2024-12-03T18:56:02,413 DEBUG [RS_OPEN_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 7e0175e16eb7a4a91438a495c1caf9ae 2024-12-03T18:56:02,413 DEBUG [RS_OPEN_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 7e0175e16eb7a4a91438a495c1caf9ae 2024-12-03T18:56:02,415 DEBUG [RS_OPEN_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 7e0175e16eb7a4a91438a495c1caf9ae 2024-12-03T18:56:02,419 DEBUG [RS_OPEN_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36677/user/jenkins/test-data/fc8b9215-fcf9-a292-1a6c-2c28cacba1d7/data/default/TestLogRolling-testLogRollOnPipelineRestart/7e0175e16eb7a4a91438a495c1caf9ae/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-03T18:56:02,419 INFO [RS_OPEN_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 7e0175e16eb7a4a91438a495c1caf9ae; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=847146, jitterRate=0.07720279693603516}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-03T18:56:02,420 DEBUG [RS_OPEN_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 7e0175e16eb7a4a91438a495c1caf9ae 2024-12-03T18:56:02,420 DEBUG [RS_OPEN_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 7e0175e16eb7a4a91438a495c1caf9ae: Running coprocessor pre-open hook at 1733252162406Writing region info on filesystem at 1733252162406Initializing all the Stores at 1733252162407 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733252162407Cleaning up temporary data from old regions at 1733252162413 (+6 ms)Running coprocessor post-open hooks at 1733252162420 (+7 ms)Region opened successfully at 1733252162420 2024-12-03T18:56:02,422 INFO [RS_OPEN_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRollOnPipelineRestart,,1733252161650.7e0175e16eb7a4a91438a495c1caf9ae., pid=6, masterSystemTime=1733252162395 2024-12-03T18:56:02,424 DEBUG [RS_OPEN_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRollOnPipelineRestart,,1733252161650.7e0175e16eb7a4a91438a495c1caf9ae. 2024-12-03T18:56:02,424 INFO [RS_OPEN_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRollOnPipelineRestart,,1733252161650.7e0175e16eb7a4a91438a495c1caf9ae. 2024-12-03T18:56:02,425 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=7e0175e16eb7a4a91438a495c1caf9ae, regionState=OPEN, openSeqNum=2, regionLocation=db5a5ccf5be8,45949,1733252160503 2024-12-03T18:56:02,428 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-11-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 7e0175e16eb7a4a91438a495c1caf9ae, server=db5a5ccf5be8,45949,1733252160503 because future has completed 2024-12-03T18:56:02,432 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-12-03T18:56:02,432 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 7e0175e16eb7a4a91438a495c1caf9ae, server=db5a5ccf5be8,45949,1733252160503 in 190 msec 2024-12-03T18:56:02,435 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-12-03T18:56:02,435 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=7e0175e16eb7a4a91438a495c1caf9ae, ASSIGN in 352 msec 2024-12-03T18:56:02,436 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-03T18:56:02,436 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnPipelineRestart","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733252162436"}]},"ts":"1733252162436"} 2024-12-03T18:56:02,438 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnPipelineRestart, state=ENABLED in hbase:meta 2024-12-03T18:56:02,439 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_POST_OPERATION 2024-12-03T18:56:02,441 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart in 788 msec 2024-12-03T18:56:03,374 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,44293,1733252112865/db5a5ccf5be8%2C44293%2C1733252112865.1733252113091 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:56:03,381 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-03T18:56:03,400 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:56:03,401 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:56:03,401 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:56:03,401 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:56:03,401 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:56:03,402 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:56:03,404 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:56:03,405 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:56:03,405 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:56:03,407 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:56:03,411 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.meta.1733252112663.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:56:04,375 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,44293,1733252112865/db5a5ccf5be8%2C44293%2C1733252112865.1733252113091 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:56:04,411 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.meta.1733252112663.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:56:05,376 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,44293,1733252112865/db5a5ccf5be8%2C44293%2C1733252112865.1733252113091 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:56:05,412 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.meta.1733252112663.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:56:06,377 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,44293,1733252112865/db5a5ccf5be8%2C44293%2C1733252112865.1733252113091 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:56:06,413 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.meta.1733252112663.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:56:06,983 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-03T18:56:06,984 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRollOnPipelineRestart' 2024-12-03T18:56:07,378 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,44293,1733252112865/db5a5ccf5be8%2C44293%2C1733252112865.1733252113091 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:56:07,414 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.meta.1733252112663.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:56:07,876 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-03T18:56:07,876 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-12-03T18:56:07,878 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart 2024-12-03T18:56:07,878 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart Metrics about Tables on a single HBase RegionServer 2024-12-03T18:56:07,879 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-03T18:56:07,879 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-12-03T18:56:07,880 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-12-03T18:56:07,880 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-12-03T18:56:08,379 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,44293,1733252112865/db5a5ccf5be8%2C44293%2C1733252112865.1733252113091 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:56:08,415 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.meta.1733252112663.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:56:09,380 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,44293,1733252112865/db5a5ccf5be8%2C44293%2C1733252112865.1733252113091 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:56:09,417 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.meta.1733252112663.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:56:10,381 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,44293,1733252112865/db5a5ccf5be8%2C44293%2C1733252112865.1733252113091 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:56:10,418 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.meta.1733252112663.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:56:11,383 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,44293,1733252112865/db5a5ccf5be8%2C44293%2C1733252112865.1733252113091 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:56:11,419 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.meta.1733252112663.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:56:11,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45865 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-03T18:56:11,751 INFO [RPCClient-NioEventLoopGroup-4-15 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRollOnPipelineRestart completed 2024-12-03T18:56:11,751 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRollOnPipelineRestart,, stopping at row=TestLogRolling-testLogRollOnPipelineRestart ,, for max=2147483647 with caching=100 2024-12-03T18:56:11,759 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRollOnPipelineRestart 2024-12-03T18:56:11,759 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRollOnPipelineRestart,,1733252161650.7e0175e16eb7a4a91438a495c1caf9ae. 2024-12-03T18:56:11,763 DEBUG [RPCClient-NioEventLoopGroup-4-14 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRollOnPipelineRestart', row='row1002', locateType=CURRENT is [region=TestLogRolling-testLogRollOnPipelineRestart,,1733252161650.7e0175e16eb7a4a91438a495c1caf9ae., hostname=db5a5ccf5be8,45949,1733252160503, seqNum=2] 2024-12-03T18:56:12,383 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,44293,1733252112865/db5a5ccf5be8%2C44293%2C1733252112865.1733252113091 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:56:12,419 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.meta.1733252112663.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:56:13,384 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,44293,1733252112865/db5a5ccf5be8%2C44293%2C1733252112865.1733252113091 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:56:13,420 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.meta.1733252112663.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:56:13,766 INFO [Time-limited test {}] wal.TestLogRolling(360): log.getCurrentFileName()): hdfs://localhost:36677/user/jenkins/test-data/fc8b9215-fcf9-a292-1a6c-2c28cacba1d7/WALs/db5a5ccf5be8,45949,1733252160503/db5a5ccf5be8%2C45949%2C1733252160503.1733252161122 2024-12-03T18:56:13,767 WARN [ResponseProcessor for block BP-1677972484-172.17.0.2-1733252158340:blk_1073741830_1006 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1677972484-172.17.0.2-1733252158340:blk_1073741830_1006 java.io.IOException: Bad response ERROR for BP-1677972484-172.17.0.2-1733252158340:blk_1073741830_1006 from datanode DatanodeInfoWithStorage[127.0.0.1:38945,DS-0094e978-a7b9-4c29-8fe8-d98674bcab81,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T18:56:13,767 WARN [ResponseProcessor for block BP-1677972484-172.17.0.2-1733252158340:blk_1073741833_1009 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1677972484-172.17.0.2-1733252158340:blk_1073741833_1009 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T18:56:13,767 WARN [ResponseProcessor for block BP-1677972484-172.17.0.2-1733252158340:blk_1073741834_1010 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1677972484-172.17.0.2-1733252158340:blk_1073741834_1010 java.io.IOException: Bad response ERROR for BP-1677972484-172.17.0.2-1733252158340:blk_1073741834_1010 from datanode DatanodeInfoWithStorage[127.0.0.1:38945,DS-0094e978-a7b9-4c29-8fe8-d98674bcab81,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T18:56:13,768 WARN [DataStreamer for file /user/jenkins/test-data/fc8b9215-fcf9-a292-1a6c-2c28cacba1d7/MasterData/WALs/db5a5ccf5be8,45865,1733252160335/db5a5ccf5be8%2C45865%2C1733252160335.1733252160629 block BP-1677972484-172.17.0.2-1733252158340:blk_1073741830_1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1677972484-172.17.0.2-1733252158340:blk_1073741830_1006 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46217,DS-63a5d467-e8c8-4238-ba88-56526b3f9bf2,DISK], DatanodeInfoWithStorage[127.0.0.1:38945,DS-0094e978-a7b9-4c29-8fe8-d98674bcab81,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:38945,DS-0094e978-a7b9-4c29-8fe8-d98674bcab81,DISK]) is bad. 2024-12-03T18:56:13,768 WARN [DataStreamer for file /user/jenkins/test-data/fc8b9215-fcf9-a292-1a6c-2c28cacba1d7/WALs/db5a5ccf5be8,45949,1733252160503/db5a5ccf5be8%2C45949%2C1733252160503.1733252161122 block BP-1677972484-172.17.0.2-1733252158340:blk_1073741833_1009 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1677972484-172.17.0.2-1733252158340:blk_1073741833_1009 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38945,DS-0094e978-a7b9-4c29-8fe8-d98674bcab81,DISK], DatanodeInfoWithStorage[127.0.0.1:46217,DS-63a5d467-e8c8-4238-ba88-56526b3f9bf2,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38945,DS-0094e978-a7b9-4c29-8fe8-d98674bcab81,DISK]) is bad. 2024-12-03T18:56:13,768 WARN [PacketResponder: BP-1677972484-172.17.0.2-1733252158340:blk_1073741834_1010, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:38945] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T18:56:13,769 WARN [DataStreamer for file /user/jenkins/test-data/fc8b9215-fcf9-a292-1a6c-2c28cacba1d7/WALs/db5a5ccf5be8,45949,1733252160503/db5a5ccf5be8%2C45949%2C1733252160503.meta.1733252161481.meta block BP-1677972484-172.17.0.2-1733252158340:blk_1073741834_1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1677972484-172.17.0.2-1733252158340:blk_1073741834_1010 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46217,DS-63a5d467-e8c8-4238-ba88-56526b3f9bf2,DISK], DatanodeInfoWithStorage[127.0.0.1:38945,DS-0094e978-a7b9-4c29-8fe8-d98674bcab81,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:38945,DS-0094e978-a7b9-4c29-8fe8-d98674bcab81,DISK]) is bad. 2024-12-03T18:56:13,768 WARN [PacketResponder: BP-1677972484-172.17.0.2-1733252158340:blk_1073741830_1006, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:38945] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Broken pipe at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T18:56:13,769 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-316798553_22 at /127.0.0.1:33688 [Receiving block BP-1677972484-172.17.0.2-1733252158340:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:38945:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:33688 dst: /127.0.0.1:38945 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T18:56:13,769 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-316798553_22 at /127.0.0.1:47828 [Receiving block BP-1677972484-172.17.0.2-1733252158340:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:46217:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:47828 dst: /127.0.0.1:46217 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T18:56:13,769 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-316798553_22 at /127.0.0.1:47834 [Receiving block BP-1677972484-172.17.0.2-1733252158340:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:46217:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:47834 dst: /127.0.0.1:46217 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T18:56:13,770 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-316798553_22 at /127.0.0.1:33692 [Receiving block BP-1677972484-172.17.0.2-1733252158340:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:38945:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:33692 dst: /127.0.0.1:38945 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T18:56:13,770 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1337877415_22 at /127.0.0.1:47810 [Receiving block BP-1677972484-172.17.0.2-1733252158340:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:46217:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:47810 dst: /127.0.0.1:46217 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T18:56:13,771 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1337877415_22 at /127.0.0.1:33654 [Receiving block BP-1677972484-172.17.0.2-1733252158340:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:38945:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:33654 dst: /127.0.0.1:38945 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T18:56:13,858 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@63008d08{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T18:56:13,858 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5a20a16b{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-03T18:56:13,858 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-03T18:56:13,859 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5d751fec{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-03T18:56:13,859 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@718cd5f1{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e40a913-c135-17dc-08ad-3f47afab457a/hadoop.log.dir/,STOPPED} 2024-12-03T18:56:13,862 WARN [BP-1677972484-172.17.0.2-1733252158340 heartbeating to localhost/127.0.0.1:36677 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-03T18:56:13,862 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-03T18:56:13,862 WARN [BP-1677972484-172.17.0.2-1733252158340 heartbeating to localhost/127.0.0.1:36677 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1677972484-172.17.0.2-1733252158340 (Datanode Uuid 72a14fa2-1e94-48af-8932-1d0a79709813) service to localhost/127.0.0.1:36677 2024-12-03T18:56:13,862 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-03T18:56:13,863 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e40a913-c135-17dc-08ad-3f47afab457a/cluster_e67e287f-8396-1ac8-75f8-a9eceb442aa7/data/data3/current/BP-1677972484-172.17.0.2-1733252158340 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T18:56:13,863 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e40a913-c135-17dc-08ad-3f47afab457a/cluster_e67e287f-8396-1ac8-75f8-a9eceb442aa7/data/data4/current/BP-1677972484-172.17.0.2-1733252158340 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T18:56:13,864 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-03T18:56:13,871 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-03T18:56:13,875 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-03T18:56:13,875 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-03T18:56:13,875 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-03T18:56:13,876 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-03T18:56:13,876 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@75f58649{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e40a913-c135-17dc-08ad-3f47afab457a/hadoop.log.dir/,AVAILABLE} 2024-12-03T18:56:13,877 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@571de0fd{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-03T18:56:13,978 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@45604664{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e40a913-c135-17dc-08ad-3f47afab457a/java.io.tmpdir/jetty-localhost-37397-hadoop-hdfs-3_4_1-tests_jar-_-any-14469419996078995192/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T18:56:13,979 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6e793ffb{HTTP/1.1, (http/1.1)}{localhost:37397} 2024-12-03T18:56:13,979 INFO [Time-limited test {}] server.Server(415): Started @171855ms 2024-12-03T18:56:13,980 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-03T18:56:13,997 WARN [ResponseProcessor for block BP-1677972484-172.17.0.2-1733252158340:blk_1073741830_1015 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1677972484-172.17.0.2-1733252158340:blk_1073741830_1015 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T18:56:13,997 WARN [ResponseProcessor for block BP-1677972484-172.17.0.2-1733252158340:blk_1073741833_1013 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1677972484-172.17.0.2-1733252158340:blk_1073741833_1013 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T18:56:13,997 WARN [ResponseProcessor for block BP-1677972484-172.17.0.2-1733252158340:blk_1073741834_1014 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1677972484-172.17.0.2-1733252158340:blk_1073741834_1014 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T18:56:13,997 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1337877415_22 at /127.0.0.1:39026 [Receiving block BP-1677972484-172.17.0.2-1733252158340:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:46217:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:39026 dst: /127.0.0.1:46217 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T18:56:13,998 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-316798553_22 at /127.0.0.1:39000 [Receiving block BP-1677972484-172.17.0.2-1733252158340:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:46217:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:39000 dst: /127.0.0.1:46217 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T18:56:13,998 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-316798553_22 at /127.0.0.1:38998 [Receiving block BP-1677972484-172.17.0.2-1733252158340:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:46217:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:38998 dst: /127.0.0.1:46217 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T18:56:13,999 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@479334cf{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T18:56:13,999 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5d6935b5{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-03T18:56:13,999 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-03T18:56:14,000 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6a724f0c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-03T18:56:14,000 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@719d6bc4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e40a913-c135-17dc-08ad-3f47afab457a/hadoop.log.dir/,STOPPED} 2024-12-03T18:56:14,001 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-03T18:56:14,001 WARN [BP-1677972484-172.17.0.2-1733252158340 heartbeating to localhost/127.0.0.1:36677 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-03T18:56:14,001 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-03T18:56:14,001 WARN [BP-1677972484-172.17.0.2-1733252158340 heartbeating to localhost/127.0.0.1:36677 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1677972484-172.17.0.2-1733252158340 (Datanode Uuid bd6c022d-b2da-4b7e-9001-0af9fe78716f) service to localhost/127.0.0.1:36677 2024-12-03T18:56:14,001 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e40a913-c135-17dc-08ad-3f47afab457a/cluster_e67e287f-8396-1ac8-75f8-a9eceb442aa7/data/data1/current/BP-1677972484-172.17.0.2-1733252158340 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T18:56:14,002 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e40a913-c135-17dc-08ad-3f47afab457a/cluster_e67e287f-8396-1ac8-75f8-a9eceb442aa7/data/data2/current/BP-1677972484-172.17.0.2-1733252158340 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T18:56:14,002 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-03T18:56:14,008 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-03T18:56:14,011 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-03T18:56:14,011 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-03T18:56:14,011 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-03T18:56:14,011 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-03T18:56:14,012 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@ae00177{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e40a913-c135-17dc-08ad-3f47afab457a/hadoop.log.dir/,AVAILABLE} 2024-12-03T18:56:14,012 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@49f94f8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-03T18:56:14,106 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5aaed393{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e40a913-c135-17dc-08ad-3f47afab457a/java.io.tmpdir/jetty-localhost-36611-hadoop-hdfs-3_4_1-tests_jar-_-any-7294700382203264576/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T18:56:14,106 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7939cb3e{HTTP/1.1, (http/1.1)}{localhost:36611} 2024-12-03T18:56:14,106 INFO [Time-limited test {}] server.Server(415): Started @171983ms 2024-12-03T18:56:14,108 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-03T18:56:14,385 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,44293,1733252112865/db5a5ccf5be8%2C44293%2C1733252112865.1733252113091 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:56:14,421 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.meta.1733252112663.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:56:14,460 WARN [Thread-1354 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-03T18:56:14,463 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd12a99cd798c7e30 with lease ID 0x841cd9ab2fcf601a: from storage DS-0094e978-a7b9-4c29-8fe8-d98674bcab81 node DatanodeRegistration(127.0.0.1:33849, datanodeUuid=72a14fa2-1e94-48af-8932-1d0a79709813, infoPort=35549, infoSecurePort=0, ipcPort=33217, storageInfo=lv=-57;cid=testClusterID;nsid=848947567;c=1733252158340), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-03T18:56:14,463 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd12a99cd798c7e30 with lease ID 0x841cd9ab2fcf601a: from storage DS-cf86c6ae-8be6-4d8f-bb27-7299f0ef92e9 node DatanodeRegistration(127.0.0.1:33849, datanodeUuid=72a14fa2-1e94-48af-8932-1d0a79709813, infoPort=35549, infoSecurePort=0, ipcPort=33217, storageInfo=lv=-57;cid=testClusterID;nsid=848947567;c=1733252158340), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-03T18:56:14,668 WARN [Thread-1374 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-03T18:56:14,670 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd217faf111508558 with lease ID 0x841cd9ab2fcf601b: from storage DS-63a5d467-e8c8-4238-ba88-56526b3f9bf2 node DatanodeRegistration(127.0.0.1:42439, datanodeUuid=bd6c022d-b2da-4b7e-9001-0af9fe78716f, infoPort=43593, infoSecurePort=0, ipcPort=35493, storageInfo=lv=-57;cid=testClusterID;nsid=848947567;c=1733252158340), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-03T18:56:14,670 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd217faf111508558 with lease ID 0x841cd9ab2fcf601b: from storage DS-d1305e60-28fd-493e-8e66-c5ee7a027671 node DatanodeRegistration(127.0.0.1:42439, datanodeUuid=bd6c022d-b2da-4b7e-9001-0af9fe78716f, infoPort=43593, infoSecurePort=0, ipcPort=35493, storageInfo=lv=-57;cid=testClusterID;nsid=848947567;c=1733252158340), blocks: 6, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-03T18:56:15,127 INFO [Time-limited test {}] wal.TestLogRolling(372): Data Nodes restarted 2024-12-03T18:56:15,132 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1002 2024-12-03T18:56:15,135 ERROR [FSHLog-0-hdfs://localhost:36677/user/jenkins/test-data/fc8b9215-fcf9-a292-1a6c-2c28cacba1d7-prefix:db5a5ccf5be8,45949,1733252160503 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46217,DS-63a5d467-e8c8-4238-ba88-56526b3f9bf2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T18:56:15,135 WARN [FSHLog-0-hdfs://localhost:36677/user/jenkins/test-data/fc8b9215-fcf9-a292-1a6c-2c28cacba1d7-prefix:db5a5ccf5be8,45949,1733252160503 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46217,DS-63a5d467-e8c8-4238-ba88-56526b3f9bf2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T18:56:15,135 DEBUG [regionserver/db5a5ccf5be8:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog db5a5ccf5be8%2C45949%2C1733252160503:(num 1733252161122) roll requested 2024-12-03T18:56:15,136 INFO [regionserver/db5a5ccf5be8:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor db5a5ccf5be8%2C45949%2C1733252160503.1733252175135 2024-12-03T18:56:15,142 DEBUG [regionserver/db5a5ccf5be8:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:36677/user/jenkins/test-data/fc8b9215-fcf9-a292-1a6c-2c28cacba1d7/WALs/db5a5ccf5be8,45949,1733252160503/db5a5ccf5be8%2C45949%2C1733252160503.1733252161122 newFile=hdfs://localhost:36677/user/jenkins/test-data/fc8b9215-fcf9-a292-1a6c-2c28cacba1d7/WALs/db5a5ccf5be8,45949,1733252160503/db5a5ccf5be8%2C45949%2C1733252160503.1733252175135 2024-12-03T18:56:15,142 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:56:15,143 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:56:15,143 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:56:15,143 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:56:15,143 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:56:15,143 INFO [regionserver/db5a5ccf5be8:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/fc8b9215-fcf9-a292-1a6c-2c28cacba1d7/WALs/db5a5ccf5be8,45949,1733252160503/db5a5ccf5be8%2C45949%2C1733252160503.1733252161122 with entries=2, filesize=1.59 KB; new WAL /user/jenkins/test-data/fc8b9215-fcf9-a292-1a6c-2c28cacba1d7/WALs/db5a5ccf5be8,45949,1733252160503/db5a5ccf5be8%2C45949%2C1733252160503.1733252175135 2024-12-03T18:56:15,144 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46217,DS-63a5d467-e8c8-4238-ba88-56526b3f9bf2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T18:56:15,144 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46217,DS-63a5d467-e8c8-4238-ba88-56526b3f9bf2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T18:56:15,144 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:36677/user/jenkins/test-data/fc8b9215-fcf9-a292-1a6c-2c28cacba1d7/WALs/db5a5ccf5be8,45949,1733252160503/db5a5ccf5be8%2C45949%2C1733252160503.1733252161122 2024-12-03T18:56:15,144 DEBUG [regionserver/db5a5ccf5be8:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35549:35549),(127.0.0.1/127.0.0.1:43593:43593)] 2024-12-03T18:56:15,144 DEBUG [regionserver/db5a5ccf5be8:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:36677/user/jenkins/test-data/fc8b9215-fcf9-a292-1a6c-2c28cacba1d7/WALs/db5a5ccf5be8,45949,1733252160503/db5a5ccf5be8%2C45949%2C1733252160503.1733252161122 is not closed yet, will try archiving it next time 2024-12-03T18:56:15,144 WARN [IPC Server handler 3 on default port 36677 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/fc8b9215-fcf9-a292-1a6c-2c28cacba1d7/WALs/db5a5ccf5be8,45949,1733252160503/db5a5ccf5be8%2C45949%2C1733252160503.1733252161122 has not been closed. Lease recovery is in progress. RecoveryId = 1017 for block blk_1073741833_1013 2024-12-03T18:56:15,145 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:36677/user/jenkins/test-data/fc8b9215-fcf9-a292-1a6c-2c28cacba1d7/WALs/db5a5ccf5be8,45949,1733252160503/db5a5ccf5be8%2C45949%2C1733252160503.1733252161122 after 1ms 2024-12-03T18:56:15,385 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,44293,1733252112865/db5a5ccf5be8%2C44293%2C1733252112865.1733252113091 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:56:15,421 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.meta.1733252112663.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:56:16,386 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,44293,1733252112865/db5a5ccf5be8%2C44293%2C1733252112865.1733252113091 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:56:16,422 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.meta.1733252112663.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:56:17,149 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1003 2024-12-03T18:56:17,387 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,44293,1733252112865/db5a5ccf5be8%2C44293%2C1733252112865.1733252113091 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:56:17,423 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.meta.1733252112663.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:56:17,465 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741833_1013: GenerationStamp not matched, existing replica is blk_1073741833_1009 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-12-03T18:56:18,388 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,44293,1733252112865/db5a5ccf5be8%2C44293%2C1733252112865.1733252113091 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:56:18,424 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.meta.1733252112663.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:56:19,146 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:36677/user/jenkins/test-data/fc8b9215-fcf9-a292-1a6c-2c28cacba1d7/WALs/db5a5ccf5be8,45949,1733252160503/db5a5ccf5be8%2C45949%2C1733252160503.1733252161122 after 4002ms 2024-12-03T18:56:19,155 WARN [ResponseProcessor for block BP-1677972484-172.17.0.2-1733252158340:blk_1073741837_1016 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1677972484-172.17.0.2-1733252158340:blk_1073741837_1016 java.io.IOException: Bad response ERROR for BP-1677972484-172.17.0.2-1733252158340:blk_1073741837_1016 from datanode DatanodeInfoWithStorage[127.0.0.1:42439,DS-63a5d467-e8c8-4238-ba88-56526b3f9bf2,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T18:56:19,156 WARN [DataStreamer for file /user/jenkins/test-data/fc8b9215-fcf9-a292-1a6c-2c28cacba1d7/WALs/db5a5ccf5be8,45949,1733252160503/db5a5ccf5be8%2C45949%2C1733252160503.1733252175135 block BP-1677972484-172.17.0.2-1733252158340:blk_1073741837_1016 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1677972484-172.17.0.2-1733252158340:blk_1073741837_1016 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33849,DS-0094e978-a7b9-4c29-8fe8-d98674bcab81,DISK], DatanodeInfoWithStorage[127.0.0.1:42439,DS-63a5d467-e8c8-4238-ba88-56526b3f9bf2,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:42439,DS-63a5d467-e8c8-4238-ba88-56526b3f9bf2,DISK]) is bad. 2024-12-03T18:56:19,156 WARN [PacketResponder: BP-1677972484-172.17.0.2-1733252158340:blk_1073741837_1016, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:42439] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T18:56:19,157 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-316798553_22 at /127.0.0.1:51740 [Receiving block BP-1677972484-172.17.0.2-1733252158340:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:33849:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51740 dst: /127.0.0.1:33849 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T18:56:19,158 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-316798553_22 at /127.0.0.1:48346 [Receiving block BP-1677972484-172.17.0.2-1733252158340:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:42439:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:48346 dst: /127.0.0.1:42439 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T18:56:19,194 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5aaed393{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T18:56:19,194 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7939cb3e{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-03T18:56:19,194 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-03T18:56:19,194 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@49f94f8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-03T18:56:19,194 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@ae00177{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e40a913-c135-17dc-08ad-3f47afab457a/hadoop.log.dir/,STOPPED} 2024-12-03T18:56:19,196 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-03T18:56:19,196 WARN [BP-1677972484-172.17.0.2-1733252158340 heartbeating to localhost/127.0.0.1:36677 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-03T18:56:19,196 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-03T18:56:19,196 WARN [BP-1677972484-172.17.0.2-1733252158340 heartbeating to localhost/127.0.0.1:36677 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1677972484-172.17.0.2-1733252158340 (Datanode Uuid bd6c022d-b2da-4b7e-9001-0af9fe78716f) service to localhost/127.0.0.1:36677 2024-12-03T18:56:19,196 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e40a913-c135-17dc-08ad-3f47afab457a/cluster_e67e287f-8396-1ac8-75f8-a9eceb442aa7/data/data1/current/BP-1677972484-172.17.0.2-1733252158340 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T18:56:19,196 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e40a913-c135-17dc-08ad-3f47afab457a/cluster_e67e287f-8396-1ac8-75f8-a9eceb442aa7/data/data2/current/BP-1677972484-172.17.0.2-1733252158340 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T18:56:19,197 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-03T18:56:19,206 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-03T18:56:19,208 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-03T18:56:19,210 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-03T18:56:19,211 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-03T18:56:19,211 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-03T18:56:19,211 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7790ff99{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e40a913-c135-17dc-08ad-3f47afab457a/hadoop.log.dir/,AVAILABLE} 2024-12-03T18:56:19,212 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5a0cdfff{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-03T18:56:19,326 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1cb32f68{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e40a913-c135-17dc-08ad-3f47afab457a/java.io.tmpdir/jetty-localhost-46645-hadoop-hdfs-3_4_1-tests_jar-_-any-11662838579148719923/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T18:56:19,326 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1a15b38f{HTTP/1.1, (http/1.1)}{localhost:46645} 2024-12-03T18:56:19,327 INFO [Time-limited test {}] server.Server(415): Started @177203ms 2024-12-03T18:56:19,328 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-03T18:56:19,345 WARN [ResponseProcessor for block BP-1677972484-172.17.0.2-1733252158340:blk_1073741837_1018 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1677972484-172.17.0.2-1733252158340:blk_1073741837_1018 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T18:56:19,346 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-316798553_22 at /127.0.0.1:51758 [Receiving block BP-1677972484-172.17.0.2-1733252158340:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:33849:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51758 dst: /127.0.0.1:33849 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T18:56:19,349 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@45604664{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T18:56:19,349 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6e793ffb{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-03T18:56:19,349 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-03T18:56:19,349 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@571de0fd{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-03T18:56:19,349 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@75f58649{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e40a913-c135-17dc-08ad-3f47afab457a/hadoop.log.dir/,STOPPED} 2024-12-03T18:56:19,350 WARN [BP-1677972484-172.17.0.2-1733252158340 heartbeating to localhost/127.0.0.1:36677 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-03T18:56:19,350 WARN [BP-1677972484-172.17.0.2-1733252158340 heartbeating to localhost/127.0.0.1:36677 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1677972484-172.17.0.2-1733252158340 (Datanode Uuid 72a14fa2-1e94-48af-8932-1d0a79709813) service to localhost/127.0.0.1:36677 2024-12-03T18:56:19,350 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-03T18:56:19,350 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-03T18:56:19,351 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e40a913-c135-17dc-08ad-3f47afab457a/cluster_e67e287f-8396-1ac8-75f8-a9eceb442aa7/data/data3/current/BP-1677972484-172.17.0.2-1733252158340 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T18:56:19,351 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e40a913-c135-17dc-08ad-3f47afab457a/cluster_e67e287f-8396-1ac8-75f8-a9eceb442aa7/data/data4/current/BP-1677972484-172.17.0.2-1733252158340 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T18:56:19,351 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-03T18:56:19,366 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-03T18:56:19,369 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-03T18:56:19,370 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-03T18:56:19,370 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-03T18:56:19,370 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-03T18:56:19,370 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@10215f32{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e40a913-c135-17dc-08ad-3f47afab457a/hadoop.log.dir/,AVAILABLE} 2024-12-03T18:56:19,370 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@21ffcd24{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-03T18:56:19,389 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,44293,1733252112865/db5a5ccf5be8%2C44293%2C1733252112865.1733252113091 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:56:19,425 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.meta.1733252112663.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:56:19,461 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4fc14d61{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e40a913-c135-17dc-08ad-3f47afab457a/java.io.tmpdir/jetty-localhost-36859-hadoop-hdfs-3_4_1-tests_jar-_-any-17022798745789112131/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T18:56:19,461 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@66269315{HTTP/1.1, (http/1.1)}{localhost:36859} 2024-12-03T18:56:19,461 INFO [Time-limited test {}] server.Server(415): Started @177338ms 2024-12-03T18:56:19,463 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-03T18:56:20,006 WARN [Thread-1428 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-03T18:56:20,008 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x426098e8b28f8afb with lease ID 0x841cd9ab2fcf601c: from storage DS-63a5d467-e8c8-4238-ba88-56526b3f9bf2 node DatanodeRegistration(127.0.0.1:36037, datanodeUuid=bd6c022d-b2da-4b7e-9001-0af9fe78716f, infoPort=41731, infoSecurePort=0, ipcPort=44403, storageInfo=lv=-57;cid=testClusterID;nsid=848947567;c=1733252158340), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-03T18:56:20,008 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x426098e8b28f8afb with lease ID 0x841cd9ab2fcf601c: from storage DS-d1305e60-28fd-493e-8e66-c5ee7a027671 node DatanodeRegistration(127.0.0.1:36037, datanodeUuid=bd6c022d-b2da-4b7e-9001-0af9fe78716f, infoPort=41731, infoSecurePort=0, ipcPort=44403, storageInfo=lv=-57;cid=testClusterID;nsid=848947567;c=1733252158340), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-03T18:56:20,200 WARN [Thread-1448 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-03T18:56:20,203 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8535ffb9df2187ef with lease ID 0x841cd9ab2fcf601d: from storage DS-0094e978-a7b9-4c29-8fe8-d98674bcab81 node DatanodeRegistration(127.0.0.1:46789, datanodeUuid=72a14fa2-1e94-48af-8932-1d0a79709813, infoPort=41023, infoSecurePort=0, ipcPort=37979, storageInfo=lv=-57;cid=testClusterID;nsid=848947567;c=1733252158340), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-03T18:56:20,203 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8535ffb9df2187ef with lease ID 0x841cd9ab2fcf601d: from storage DS-cf86c6ae-8be6-4d8f-bb27-7299f0ef92e9 node DatanodeRegistration(127.0.0.1:46789, datanodeUuid=72a14fa2-1e94-48af-8932-1d0a79709813, infoPort=41023, infoSecurePort=0, ipcPort=37979, storageInfo=lv=-57;cid=testClusterID;nsid=848947567;c=1733252158340), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-03T18:56:20,390 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,44293,1733252112865/db5a5ccf5be8%2C44293%2C1733252112865.1733252113091 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:56:20,425 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.meta.1733252112663.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:56:20,484 INFO [Time-limited test {}] wal.TestLogRolling(389): Data Nodes restarted 2024-12-03T18:56:20,486 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1004 2024-12-03T18:56:20,487 ERROR [FSHLog-0-hdfs://localhost:36677/user/jenkins/test-data/fc8b9215-fcf9-a292-1a6c-2c28cacba1d7-prefix:db5a5ccf5be8,45949,1733252160503 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33849,DS-0094e978-a7b9-4c29-8fe8-d98674bcab81,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T18:56:20,487 WARN [FSHLog-0-hdfs://localhost:36677/user/jenkins/test-data/fc8b9215-fcf9-a292-1a6c-2c28cacba1d7-prefix:db5a5ccf5be8,45949,1733252160503 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33849,DS-0094e978-a7b9-4c29-8fe8-d98674bcab81,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T18:56:20,487 DEBUG [regionserver/db5a5ccf5be8:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog db5a5ccf5be8%2C45949%2C1733252160503:(num 1733252175135) roll requested 2024-12-03T18:56:20,488 INFO [regionserver/db5a5ccf5be8:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor db5a5ccf5be8%2C45949%2C1733252160503.1733252180487 2024-12-03T18:56:20,500 DEBUG [regionserver/db5a5ccf5be8:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:36677/user/jenkins/test-data/fc8b9215-fcf9-a292-1a6c-2c28cacba1d7/WALs/db5a5ccf5be8,45949,1733252160503/db5a5ccf5be8%2C45949%2C1733252160503.1733252175135 newFile=hdfs://localhost:36677/user/jenkins/test-data/fc8b9215-fcf9-a292-1a6c-2c28cacba1d7/WALs/db5a5ccf5be8,45949,1733252160503/db5a5ccf5be8%2C45949%2C1733252160503.1733252180487 2024-12-03T18:56:20,500 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:56:20,500 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:56:20,500 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:56:20,500 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:56:20,500 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:56:20,501 INFO [regionserver/db5a5ccf5be8:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/fc8b9215-fcf9-a292-1a6c-2c28cacba1d7/WALs/db5a5ccf5be8,45949,1733252160503/db5a5ccf5be8%2C45949%2C1733252160503.1733252175135 with entries=2, filesize=2.37 KB; new WAL /user/jenkins/test-data/fc8b9215-fcf9-a292-1a6c-2c28cacba1d7/WALs/db5a5ccf5be8,45949,1733252160503/db5a5ccf5be8%2C45949%2C1733252160503.1733252180487 2024-12-03T18:56:20,501 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33849,DS-0094e978-a7b9-4c29-8fe8-d98674bcab81,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T18:56:20,501 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33849,DS-0094e978-a7b9-4c29-8fe8-d98674bcab81,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T18:56:20,501 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:36677/user/jenkins/test-data/fc8b9215-fcf9-a292-1a6c-2c28cacba1d7/WALs/db5a5ccf5be8,45949,1733252160503/db5a5ccf5be8%2C45949%2C1733252160503.1733252175135 2024-12-03T18:56:20,501 WARN [IPC Server handler 2 on default port 36677 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/fc8b9215-fcf9-a292-1a6c-2c28cacba1d7/WALs/db5a5ccf5be8,45949,1733252160503/db5a5ccf5be8%2C45949%2C1733252160503.1733252175135 has not been closed. Lease recovery is in progress. RecoveryId = 1020 for block blk_1073741837_1018 2024-12-03T18:56:20,501 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:36677/user/jenkins/test-data/fc8b9215-fcf9-a292-1a6c-2c28cacba1d7/WALs/db5a5ccf5be8,45949,1733252160503/db5a5ccf5be8%2C45949%2C1733252160503.1733252175135 after 0ms 2024-12-03T18:56:20,511 DEBUG [regionserver/db5a5ccf5be8:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41023:41023),(127.0.0.1/127.0.0.1:41731:41731)] 2024-12-03T18:56:20,511 DEBUG [regionserver/db5a5ccf5be8:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:36677/user/jenkins/test-data/fc8b9215-fcf9-a292-1a6c-2c28cacba1d7/WALs/db5a5ccf5be8,45949,1733252160503/db5a5ccf5be8%2C45949%2C1733252160503.1733252175135 is not closed yet, will try archiving it next time 2024-12-03T18:56:21,390 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,44293,1733252112865/db5a5ccf5be8%2C44293%2C1733252112865.1733252113091 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:56:21,426 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.meta.1733252112663.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:56:22,391 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,44293,1733252112865/db5a5ccf5be8%2C44293%2C1733252112865.1733252113091 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:56:22,427 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.meta.1733252112663.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:56:22,513 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor db5a5ccf5be8%2C45949%2C1733252160503.1733252182513 2024-12-03T18:56:22,522 DEBUG [Time-limited test {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:36677/user/jenkins/test-data/fc8b9215-fcf9-a292-1a6c-2c28cacba1d7/WALs/db5a5ccf5be8,45949,1733252160503/db5a5ccf5be8%2C45949%2C1733252160503.1733252180487 newFile=hdfs://localhost:36677/user/jenkins/test-data/fc8b9215-fcf9-a292-1a6c-2c28cacba1d7/WALs/db5a5ccf5be8,45949,1733252160503/db5a5ccf5be8%2C45949%2C1733252160503.1733252182513 2024-12-03T18:56:22,523 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:56:22,523 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:56:22,524 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:56:22,524 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:56:22,524 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:56:22,524 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/fc8b9215-fcf9-a292-1a6c-2c28cacba1d7/WALs/db5a5ccf5be8,45949,1733252160503/db5a5ccf5be8%2C45949%2C1733252160503.1733252180487 with entries=1, filesize=1.23 KB; new WAL /user/jenkins/test-data/fc8b9215-fcf9-a292-1a6c-2c28cacba1d7/WALs/db5a5ccf5be8,45949,1733252160503/db5a5ccf5be8%2C45949%2C1733252160503.1733252182513 2024-12-03T18:56:22,528 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41731:41731),(127.0.0.1/127.0.0.1:41023:41023)] 2024-12-03T18:56:22,528 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:36677/user/jenkins/test-data/fc8b9215-fcf9-a292-1a6c-2c28cacba1d7/WALs/db5a5ccf5be8,45949,1733252160503/db5a5ccf5be8%2C45949%2C1733252160503.1733252175135 is not closed yet, will try archiving it next time 2024-12-03T18:56:22,528 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:36677/user/jenkins/test-data/fc8b9215-fcf9-a292-1a6c-2c28cacba1d7/WALs/db5a5ccf5be8,45949,1733252160503/db5a5ccf5be8%2C45949%2C1733252160503.1733252180487 is not closed yet, will try archiving it next time 2024-12-03T18:56:22,528 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:36677/user/jenkins/test-data/fc8b9215-fcf9-a292-1a6c-2c28cacba1d7/WALs/db5a5ccf5be8,45949,1733252160503/db5a5ccf5be8%2C45949%2C1733252160503.1733252161122 2024-12-03T18:56:22,528 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:36677/user/jenkins/test-data/fc8b9215-fcf9-a292-1a6c-2c28cacba1d7/WALs/db5a5ccf5be8,45949,1733252160503/db5a5ccf5be8%2C45949%2C1733252160503.1733252161122 2024-12-03T18:56:22,529 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:36677/user/jenkins/test-data/fc8b9215-fcf9-a292-1a6c-2c28cacba1d7/WALs/db5a5ccf5be8,45949,1733252160503/db5a5ccf5be8%2C45949%2C1733252160503.1733252161122 after 1ms 2024-12-03T18:56:22,529 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/fc8b9215-fcf9-a292-1a6c-2c28cacba1d7/WALs/db5a5ccf5be8,45949,1733252160503/db5a5ccf5be8%2C45949%2C1733252160503.1733252161122 2024-12-03T18:56:22,531 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36037 is added to blk_1073741838_1019 (size=1264) 2024-12-03T18:56:22,531 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46789 is added to blk_1073741838_1019 (size=1264) 2024-12-03T18:56:22,545 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #3: [\x00/METAFAMILY:HBASE::REGION_EVENT::REGION_OPEN/1733252162421/Put/vlen=218/seqid=0] 2024-12-03T18:56:22,545 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #4: [row1002/info:/1733252171765/Put/vlen=1045/seqid=0] 2024-12-03T18:56:22,545 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/fc8b9215-fcf9-a292-1a6c-2c28cacba1d7/WALs/db5a5ccf5be8,45949,1733252160503/db5a5ccf5be8%2C45949%2C1733252160503.1733252161122 2024-12-03T18:56:22,546 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:36677/user/jenkins/test-data/fc8b9215-fcf9-a292-1a6c-2c28cacba1d7/WALs/db5a5ccf5be8,45949,1733252160503/db5a5ccf5be8%2C45949%2C1733252160503.1733252175135 2024-12-03T18:56:22,546 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:36677/user/jenkins/test-data/fc8b9215-fcf9-a292-1a6c-2c28cacba1d7/WALs/db5a5ccf5be8,45949,1733252160503/db5a5ccf5be8%2C45949%2C1733252160503.1733252175135 2024-12-03T18:56:22,546 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:36677/user/jenkins/test-data/fc8b9215-fcf9-a292-1a6c-2c28cacba1d7/WALs/db5a5ccf5be8,45949,1733252160503/db5a5ccf5be8%2C45949%2C1733252160503.1733252175135 after 0ms 2024-12-03T18:56:22,546 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/fc8b9215-fcf9-a292-1a6c-2c28cacba1d7/WALs/db5a5ccf5be8,45949,1733252160503/db5a5ccf5be8%2C45949%2C1733252160503.1733252175135 2024-12-03T18:56:22,551 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #5: [row1003/info:/1733252175134/Put/vlen=1045/seqid=0] 2024-12-03T18:56:22,551 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #6: [row1004/info:/1733252177151/Put/vlen=1045/seqid=0] 2024-12-03T18:56:22,552 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/fc8b9215-fcf9-a292-1a6c-2c28cacba1d7/WALs/db5a5ccf5be8,45949,1733252160503/db5a5ccf5be8%2C45949%2C1733252160503.1733252175135 2024-12-03T18:56:22,552 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:36677/user/jenkins/test-data/fc8b9215-fcf9-a292-1a6c-2c28cacba1d7/WALs/db5a5ccf5be8,45949,1733252160503/db5a5ccf5be8%2C45949%2C1733252160503.1733252180487 2024-12-03T18:56:22,552 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:36677/user/jenkins/test-data/fc8b9215-fcf9-a292-1a6c-2c28cacba1d7/WALs/db5a5ccf5be8,45949,1733252160503/db5a5ccf5be8%2C45949%2C1733252160503.1733252180487 2024-12-03T18:56:22,552 WARN [IPC Server handler 0 on default port 36677 {}] namenode.FSNamesystem(3730): BLOCK* internalReleaseLease: All existing blocks are COMPLETE, lease removed, file /user/jenkins/test-data/fc8b9215-fcf9-a292-1a6c-2c28cacba1d7/WALs/db5a5ccf5be8,45949,1733252160503/db5a5ccf5be8%2C45949%2C1733252160503.1733252180487 closed. 2024-12-03T18:56:22,552 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:36677/user/jenkins/test-data/fc8b9215-fcf9-a292-1a6c-2c28cacba1d7/WALs/db5a5ccf5be8,45949,1733252160503/db5a5ccf5be8%2C45949%2C1733252160503.1733252180487 after 0ms 2024-12-03T18:56:22,553 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/fc8b9215-fcf9-a292-1a6c-2c28cacba1d7/WALs/db5a5ccf5be8,45949,1733252160503/db5a5ccf5be8%2C45949%2C1733252160503.1733252180487 2024-12-03T18:56:22,557 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #7: [row1005/info:/1733252180487/Put/vlen=1045/seqid=0] 2024-12-03T18:56:22,557 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:36677/user/jenkins/test-data/fc8b9215-fcf9-a292-1a6c-2c28cacba1d7/WALs/db5a5ccf5be8,45949,1733252160503/db5a5ccf5be8%2C45949%2C1733252160503.1733252182513 2024-12-03T18:56:22,557 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:36677/user/jenkins/test-data/fc8b9215-fcf9-a292-1a6c-2c28cacba1d7/WALs/db5a5ccf5be8,45949,1733252160503/db5a5ccf5be8%2C45949%2C1733252160503.1733252182513 2024-12-03T18:56:22,561 WARN [IPC Server handler 1 on default port 36677 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/fc8b9215-fcf9-a292-1a6c-2c28cacba1d7/WALs/db5a5ccf5be8,45949,1733252160503/db5a5ccf5be8%2C45949%2C1733252160503.1733252182513 has not been closed. Lease recovery is in progress. RecoveryId = 1022 for block blk_1073741839_1021 2024-12-03T18:56:22,561 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:36677/user/jenkins/test-data/fc8b9215-fcf9-a292-1a6c-2c28cacba1d7/WALs/db5a5ccf5be8,45949,1733252160503/db5a5ccf5be8%2C45949%2C1733252160503.1733252182513 after 4ms 2024-12-03T18:56:22,931 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:36677/user/jenkins/test-data/fc8b9215-fcf9-a292-1a6c-2c28cacba1d7/WALs/db5a5ccf5be8,45949,1733252160503/db5a5ccf5be8%2C45949%2C1733252160503.1733252175135 is not closed yet, will try archiving it next time 2024-12-03T18:56:23,009 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741837_1018: GenerationStamp not matched, existing replica is blk_1073741837_1016 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-12-03T18:56:23,206 WARN [ResponseProcessor for block BP-1677972484-172.17.0.2-1733252158340:blk_1073741839_1021 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1677972484-172.17.0.2-1733252158340:blk_1073741839_1021 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T18:56:23,206 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1337877415_22 at /127.0.0.1:45206 [Receiving block BP-1677972484-172.17.0.2-1733252158340:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:36037:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45206 dst: /127.0.0.1:36037 java.io.InterruptedIOException: Interrupted while waiting for IO on channel java.nio.channels.SocketChannel[connected local=localhost/127.0.0.1:36037 remote=/127.0.0.1:45206]. Total timeout mills is 60000, 59317 millis timeout left. at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:350) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T18:56:23,206 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1337877415_22 at /127.0.0.1:52814 [Receiving block BP-1677972484-172.17.0.2-1733252158340:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:46789:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:52814 dst: /127.0.0.1:46789 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T18:56:23,206 WARN [DataStreamer for file /user/jenkins/test-data/fc8b9215-fcf9-a292-1a6c-2c28cacba1d7/WALs/db5a5ccf5be8,45949,1733252160503/db5a5ccf5be8%2C45949%2C1733252160503.1733252182513 block BP-1677972484-172.17.0.2-1733252158340:blk_1073741839_1021 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1677972484-172.17.0.2-1733252158340:blk_1073741839_1021 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36037,DS-63a5d467-e8c8-4238-ba88-56526b3f9bf2,DISK], DatanodeInfoWithStorage[127.0.0.1:46789,DS-0094e978-a7b9-4c29-8fe8-d98674bcab81,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36037,DS-63a5d467-e8c8-4238-ba88-56526b3f9bf2,DISK]) is bad. 2024-12-03T18:56:23,207 WARN [DataStreamer for file /user/jenkins/test-data/fc8b9215-fcf9-a292-1a6c-2c28cacba1d7/WALs/db5a5ccf5be8,45949,1733252160503/db5a5ccf5be8%2C45949%2C1733252160503.1733252182513 block BP-1677972484-172.17.0.2-1733252158340:blk_1073741839_1021 {}] hdfs.DataStreamer(859): DataStreamer Exception org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1677972484-172.17.0.2-1733252158340:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T18:56:23,210 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36037 is added to blk_1073741839_1022 (size=85) 2024-12-03T18:56:23,392 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,44293,1733252112865/db5a5ccf5be8%2C44293%2C1733252112865.1733252113091 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:56:23,427 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.meta.1733252112663.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:56:24,392 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,44293,1733252112865/db5a5ccf5be8%2C44293%2C1733252112865.1733252113091 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:56:24,428 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.meta.1733252112663.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:56:24,503 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:36677/user/jenkins/test-data/fc8b9215-fcf9-a292-1a6c-2c28cacba1d7/WALs/db5a5ccf5be8,45949,1733252160503/db5a5ccf5be8%2C45949%2C1733252160503.1733252175135 after 4002ms 2024-12-03T18:56:25,393 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,44293,1733252112865/db5a5ccf5be8%2C44293%2C1733252112865.1733252113091 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:56:25,429 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.meta.1733252112663.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:56:26,393 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,44293,1733252112865/db5a5ccf5be8%2C44293%2C1733252112865.1733252113091 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:56:26,429 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.meta.1733252112663.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:56:26,562 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:36677/user/jenkins/test-data/fc8b9215-fcf9-a292-1a6c-2c28cacba1d7/WALs/db5a5ccf5be8,45949,1733252160503/db5a5ccf5be8%2C45949%2C1733252160503.1733252182513 after 4005ms 2024-12-03T18:56:26,562 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/fc8b9215-fcf9-a292-1a6c-2c28cacba1d7/WALs/db5a5ccf5be8,45949,1733252160503/db5a5ccf5be8%2C45949%2C1733252160503.1733252182513 2024-12-03T18:56:26,565 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/fc8b9215-fcf9-a292-1a6c-2c28cacba1d7/WALs/db5a5ccf5be8,45949,1733252160503/db5a5ccf5be8%2C45949%2C1733252160503.1733252182513 2024-12-03T18:56:26,566 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 7e0175e16eb7a4a91438a495c1caf9ae 1/1 column families, dataSize=4.20 KB heapSize=4.75 KB 2024-12-03T18:56:26,566 ERROR [FSHLog-0-hdfs://localhost:36677/user/jenkins/test-data/fc8b9215-fcf9-a292-1a6c-2c28cacba1d7-prefix:db5a5ccf5be8,45949,1733252160503 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1677972484-172.17.0.2-1733252158340:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T18:56:26,567 WARN [FSHLog-0-hdfs://localhost:36677/user/jenkins/test-data/fc8b9215-fcf9-a292-1a6c-2c28cacba1d7-prefix:db5a5ccf5be8,45949,1733252160503 {}] wal.AbstractFSWAL(2174): append entry failed org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1677972484-172.17.0.2-1733252158340:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T18:56:26,567 DEBUG [regionserver/db5a5ccf5be8:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog db5a5ccf5be8%2C45949%2C1733252160503:(num 1733252182513) roll requested 2024-12-03T18:56:26,567 INFO [regionserver/db5a5ccf5be8:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor db5a5ccf5be8%2C45949%2C1733252160503.1733252186567 2024-12-03T18:56:26,572 DEBUG [regionserver/db5a5ccf5be8:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:36677/user/jenkins/test-data/fc8b9215-fcf9-a292-1a6c-2c28cacba1d7/WALs/db5a5ccf5be8,45949,1733252160503/db5a5ccf5be8%2C45949%2C1733252160503.1733252182513 newFile=hdfs://localhost:36677/user/jenkins/test-data/fc8b9215-fcf9-a292-1a6c-2c28cacba1d7/WALs/db5a5ccf5be8,45949,1733252160503/db5a5ccf5be8%2C45949%2C1733252160503.1733252186567 2024-12-03T18:56:26,573 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:56:26,573 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:56:26,573 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:56:26,573 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:56:26,573 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:56:26,573 INFO [regionserver/db5a5ccf5be8:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/fc8b9215-fcf9-a292-1a6c-2c28cacba1d7/WALs/db5a5ccf5be8,45949,1733252160503/db5a5ccf5be8%2C45949%2C1733252160503.1733252182513 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/fc8b9215-fcf9-a292-1a6c-2c28cacba1d7/WALs/db5a5ccf5be8,45949,1733252160503/db5a5ccf5be8%2C45949%2C1733252160503.1733252186567 2024-12-03T18:56:26,573 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1677972484-172.17.0.2-1733252158340:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T18:56:26,574 DEBUG [regionserver/db5a5ccf5be8:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41023:41023),(127.0.0.1/127.0.0.1:41731:41731)] 2024-12-03T18:56:26,574 DEBUG [regionserver/db5a5ccf5be8:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:36677/user/jenkins/test-data/fc8b9215-fcf9-a292-1a6c-2c28cacba1d7/WALs/db5a5ccf5be8,45949,1733252160503/db5a5ccf5be8%2C45949%2C1733252160503.1733252182513 is not closed yet, will try archiving it next time 2024-12-03T18:56:26,574 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1677972484-172.17.0.2-1733252158340:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T18:56:26,574 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:36677/user/jenkins/test-data/fc8b9215-fcf9-a292-1a6c-2c28cacba1d7/WALs/db5a5ccf5be8,45949,1733252160503/db5a5ccf5be8%2C45949%2C1733252160503.1733252182513 2024-12-03T18:56:26,575 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:36677/user/jenkins/test-data/fc8b9215-fcf9-a292-1a6c-2c28cacba1d7/WALs/db5a5ccf5be8,45949,1733252160503/db5a5ccf5be8%2C45949%2C1733252160503.1733252182513 after 1ms 2024-12-03T18:56:26,576 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:36677/user/jenkins/test-data/fc8b9215-fcf9-a292-1a6c-2c28cacba1d7/WALs/db5a5ccf5be8,45949,1733252160503/db5a5ccf5be8%2C45949%2C1733252160503.1733252182513 to hdfs://localhost:36677/user/jenkins/test-data/fc8b9215-fcf9-a292-1a6c-2c28cacba1d7/oldWALs/db5a5ccf5be8%2C45949%2C1733252160503.1733252182513 2024-12-03T18:56:26,595 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36677/user/jenkins/test-data/fc8b9215-fcf9-a292-1a6c-2c28cacba1d7/data/default/TestLogRolling-testLogRollOnPipelineRestart/7e0175e16eb7a4a91438a495c1caf9ae/.tmp/info/08deb5da396f48ff8dfefe21efa47f57 is 1080, key is row1002/info:/1733252171765/Put/seqid=0 2024-12-03T18:56:26,600 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36037 is added to blk_1073741841_1024 (size=9270) 2024-12-03T18:56:26,600 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46789 is added to blk_1073741841_1024 (size=9270) 2024-12-03T18:56:26,601 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.20 KB at sequenceid=8 (bloomFilter=true), to=hdfs://localhost:36677/user/jenkins/test-data/fc8b9215-fcf9-a292-1a6c-2c28cacba1d7/data/default/TestLogRolling-testLogRollOnPipelineRestart/7e0175e16eb7a4a91438a495c1caf9ae/.tmp/info/08deb5da396f48ff8dfefe21efa47f57 2024-12-03T18:56:26,618 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36677/user/jenkins/test-data/fc8b9215-fcf9-a292-1a6c-2c28cacba1d7/data/default/TestLogRolling-testLogRollOnPipelineRestart/7e0175e16eb7a4a91438a495c1caf9ae/.tmp/info/08deb5da396f48ff8dfefe21efa47f57 as hdfs://localhost:36677/user/jenkins/test-data/fc8b9215-fcf9-a292-1a6c-2c28cacba1d7/data/default/TestLogRolling-testLogRollOnPipelineRestart/7e0175e16eb7a4a91438a495c1caf9ae/info/08deb5da396f48ff8dfefe21efa47f57 2024-12-03T18:56:26,623 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36677/user/jenkins/test-data/fc8b9215-fcf9-a292-1a6c-2c28cacba1d7/data/default/TestLogRolling-testLogRollOnPipelineRestart/7e0175e16eb7a4a91438a495c1caf9ae/info/08deb5da396f48ff8dfefe21efa47f57, entries=4, sequenceid=8, filesize=9.1 K 2024-12-03T18:56:26,625 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~4.20 KB/4304, heapSize ~4.73 KB/4848, currentSize=0 B/0 for 7e0175e16eb7a4a91438a495c1caf9ae in 58ms, sequenceid=8, compaction requested=false 2024-12-03T18:56:26,625 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 7e0175e16eb7a4a91438a495c1caf9ae: 2024-12-03T18:56:26,625 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.74 KB heapSize=3.77 KB 2024-12-03T18:56:26,625 ERROR [FSHLog-0-hdfs://localhost:36677/user/jenkins/test-data/fc8b9215-fcf9-a292-1a6c-2c28cacba1d7-prefix:db5a5ccf5be8,45949,1733252160503.meta {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46217,DS-63a5d467-e8c8-4238-ba88-56526b3f9bf2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T18:56:26,625 WARN [FSHLog-0-hdfs://localhost:36677/user/jenkins/test-data/fc8b9215-fcf9-a292-1a6c-2c28cacba1d7-prefix:db5a5ccf5be8,45949,1733252160503.meta {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46217,DS-63a5d467-e8c8-4238-ba88-56526b3f9bf2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T18:56:26,625 DEBUG [regionserver/db5a5ccf5be8:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog db5a5ccf5be8%2C45949%2C1733252160503.meta:.meta(num 1733252161481) roll requested 2024-12-03T18:56:26,626 INFO [regionserver/db5a5ccf5be8:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor db5a5ccf5be8%2C45949%2C1733252160503.meta.1733252186626.meta 2024-12-03T18:56:26,637 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:56:26,637 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:56:26,637 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:56:26,637 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:56:26,638 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:56:26,638 INFO [regionserver/db5a5ccf5be8:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/fc8b9215-fcf9-a292-1a6c-2c28cacba1d7/WALs/db5a5ccf5be8,45949,1733252160503/db5a5ccf5be8%2C45949%2C1733252160503.meta.1733252161481.meta with entries=8, filesize=2.36 KB; new WAL /user/jenkins/test-data/fc8b9215-fcf9-a292-1a6c-2c28cacba1d7/WALs/db5a5ccf5be8,45949,1733252160503/db5a5ccf5be8%2C45949%2C1733252160503.meta.1733252186626.meta 2024-12-03T18:56:26,638 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46217,DS-63a5d467-e8c8-4238-ba88-56526b3f9bf2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T18:56:26,638 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46217,DS-63a5d467-e8c8-4238-ba88-56526b3f9bf2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T18:56:26,638 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:36677/user/jenkins/test-data/fc8b9215-fcf9-a292-1a6c-2c28cacba1d7/WALs/db5a5ccf5be8,45949,1733252160503/db5a5ccf5be8%2C45949%2C1733252160503.meta.1733252161481.meta 2024-12-03T18:56:26,639 WARN [IPC Server handler 2 on default port 36677 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/fc8b9215-fcf9-a292-1a6c-2c28cacba1d7/WALs/db5a5ccf5be8,45949,1733252160503/db5a5ccf5be8%2C45949%2C1733252160503.meta.1733252161481.meta has not been closed. Lease recovery is in progress. RecoveryId = 1026 for block blk_1073741834_1014 2024-12-03T18:56:26,639 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:36677/user/jenkins/test-data/fc8b9215-fcf9-a292-1a6c-2c28cacba1d7/WALs/db5a5ccf5be8,45949,1733252160503/db5a5ccf5be8%2C45949%2C1733252160503.meta.1733252161481.meta after 1ms 2024-12-03T18:56:26,640 DEBUG [regionserver/db5a5ccf5be8:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41023:41023),(127.0.0.1/127.0.0.1:41731:41731)] 2024-12-03T18:56:26,640 DEBUG [regionserver/db5a5ccf5be8:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:36677/user/jenkins/test-data/fc8b9215-fcf9-a292-1a6c-2c28cacba1d7/WALs/db5a5ccf5be8,45949,1733252160503/db5a5ccf5be8%2C45949%2C1733252160503.meta.1733252161481.meta is not closed yet, will try archiving it next time 2024-12-03T18:56:26,656 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36677/user/jenkins/test-data/fc8b9215-fcf9-a292-1a6c-2c28cacba1d7/data/hbase/meta/1588230740/.tmp/info/b67fd96f1e704372a1bdf40be9eeda99 is 207, key is TestLogRolling-testLogRollOnPipelineRestart,,1733252161650.7e0175e16eb7a4a91438a495c1caf9ae./info:regioninfo/1733252162425/Put/seqid=0 2024-12-03T18:56:26,666 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46789 is added to blk_1073741843_1027 (size=7125) 2024-12-03T18:56:26,667 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36037 is added to blk_1073741843_1027 (size=7125) 2024-12-03T18:56:26,667 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.52 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:36677/user/jenkins/test-data/fc8b9215-fcf9-a292-1a6c-2c28cacba1d7/data/hbase/meta/1588230740/.tmp/info/b67fd96f1e704372a1bdf40be9eeda99 2024-12-03T18:56:26,687 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36677/user/jenkins/test-data/fc8b9215-fcf9-a292-1a6c-2c28cacba1d7/data/hbase/meta/1588230740/.tmp/ns/ce29826316b4490fab626a7b576fd033 is 43, key is default/ns:d/1733252161604/Put/seqid=0 2024-12-03T18:56:26,694 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36037 is added to blk_1073741844_1028 (size=5153) 2024-12-03T18:56:26,695 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46789 is added to blk_1073741844_1028 (size=5153) 2024-12-03T18:56:26,696 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:36677/user/jenkins/test-data/fc8b9215-fcf9-a292-1a6c-2c28cacba1d7/data/hbase/meta/1588230740/.tmp/ns/ce29826316b4490fab626a7b576fd033 2024-12-03T18:56:26,714 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36677/user/jenkins/test-data/fc8b9215-fcf9-a292-1a6c-2c28cacba1d7/data/hbase/meta/1588230740/.tmp/table/adf3fe67355d4fc0a2c7bd7d5f4fdebf is 79, key is TestLogRolling-testLogRollOnPipelineRestart/table:state/1733252162436/Put/seqid=0 2024-12-03T18:56:26,719 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36037 is added to blk_1073741845_1029 (size=5438) 2024-12-03T18:56:26,720 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46789 is added to blk_1073741845_1029 (size=5438) 2024-12-03T18:56:26,720 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=150 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:36677/user/jenkins/test-data/fc8b9215-fcf9-a292-1a6c-2c28cacba1d7/data/hbase/meta/1588230740/.tmp/table/adf3fe67355d4fc0a2c7bd7d5f4fdebf 2024-12-03T18:56:26,728 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36677/user/jenkins/test-data/fc8b9215-fcf9-a292-1a6c-2c28cacba1d7/data/hbase/meta/1588230740/.tmp/info/b67fd96f1e704372a1bdf40be9eeda99 as hdfs://localhost:36677/user/jenkins/test-data/fc8b9215-fcf9-a292-1a6c-2c28cacba1d7/data/hbase/meta/1588230740/info/b67fd96f1e704372a1bdf40be9eeda99 2024-12-03T18:56:26,737 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36677/user/jenkins/test-data/fc8b9215-fcf9-a292-1a6c-2c28cacba1d7/data/hbase/meta/1588230740/info/b67fd96f1e704372a1bdf40be9eeda99, entries=10, sequenceid=11, filesize=7.0 K 2024-12-03T18:56:26,738 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36677/user/jenkins/test-data/fc8b9215-fcf9-a292-1a6c-2c28cacba1d7/data/hbase/meta/1588230740/.tmp/ns/ce29826316b4490fab626a7b576fd033 as hdfs://localhost:36677/user/jenkins/test-data/fc8b9215-fcf9-a292-1a6c-2c28cacba1d7/data/hbase/meta/1588230740/ns/ce29826316b4490fab626a7b576fd033 2024-12-03T18:56:26,745 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36677/user/jenkins/test-data/fc8b9215-fcf9-a292-1a6c-2c28cacba1d7/data/hbase/meta/1588230740/ns/ce29826316b4490fab626a7b576fd033, entries=2, sequenceid=11, filesize=5.0 K 2024-12-03T18:56:26,746 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36677/user/jenkins/test-data/fc8b9215-fcf9-a292-1a6c-2c28cacba1d7/data/hbase/meta/1588230740/.tmp/table/adf3fe67355d4fc0a2c7bd7d5f4fdebf as hdfs://localhost:36677/user/jenkins/test-data/fc8b9215-fcf9-a292-1a6c-2c28cacba1d7/data/hbase/meta/1588230740/table/adf3fe67355d4fc0a2c7bd7d5f4fdebf 2024-12-03T18:56:26,753 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36677/user/jenkins/test-data/fc8b9215-fcf9-a292-1a6c-2c28cacba1d7/data/hbase/meta/1588230740/table/adf3fe67355d4fc0a2c7bd7d5f4fdebf, entries=2, sequenceid=11, filesize=5.3 K 2024-12-03T18:56:26,755 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~1.74 KB/1782, heapSize ~3.48 KB/3560, currentSize=0 B/0 for 1588230740 in 130ms, sequenceid=11, compaction requested=false 2024-12-03T18:56:26,755 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-12-03T18:56:26,762 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-03T18:56:26,762 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-03T18:56:26,762 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-03T18:56:26,763 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T18:56:26,763 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T18:56:26,763 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-03T18:56:26,763 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-03T18:56:26,763 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1634300279, stopped=false 2024-12-03T18:56:26,763 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=db5a5ccf5be8,45865,1733252160335 2024-12-03T18:56:26,833 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-03T18:56:26,833 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45949-0x1019c8d16e80001, quorum=127.0.0.1:60968, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-03T18:56:26,833 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45865-0x1019c8d16e80000, quorum=127.0.0.1:60968, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-03T18:56:26,833 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45949-0x1019c8d16e80001, quorum=127.0.0.1:60968, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T18:56:26,833 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45865-0x1019c8d16e80000, quorum=127.0.0.1:60968, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T18:56:26,833 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-03T18:56:26,833 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-03T18:56:26,833 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T18:56:26,833 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'db5a5ccf5be8,45949,1733252160503' ***** 2024-12-03T18:56:26,833 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-03T18:56:26,834 INFO [RS:0;db5a5ccf5be8:45949 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-03T18:56:26,834 INFO [RS:0;db5a5ccf5be8:45949 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-03T18:56:26,834 INFO [RS:0;db5a5ccf5be8:45949 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-03T18:56:26,834 INFO [RS:0;db5a5ccf5be8:45949 {}] regionserver.HRegionServer(3091): Received CLOSE for 7e0175e16eb7a4a91438a495c1caf9ae 2024-12-03T18:56:26,834 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-03T18:56:26,834 INFO [RS:0;db5a5ccf5be8:45949 {}] regionserver.HRegionServer(959): stopping server db5a5ccf5be8,45949,1733252160503 2024-12-03T18:56:26,834 INFO [RS:0;db5a5ccf5be8:45949 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-03T18:56:26,834 INFO [RS:0;db5a5ccf5be8:45949 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;db5a5ccf5be8:45949. 2024-12-03T18:56:26,834 DEBUG [RS:0;db5a5ccf5be8:45949 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-03T18:56:26,835 DEBUG [RS:0;db5a5ccf5be8:45949 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T18:56:26,835 DEBUG [RS_CLOSE_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 7e0175e16eb7a4a91438a495c1caf9ae, disabling compactions & flushes 2024-12-03T18:56:26,835 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:45865-0x1019c8d16e80000, quorum=127.0.0.1:60968, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-03T18:56:26,835 INFO [RS:0;db5a5ccf5be8:45949 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-03T18:56:26,835 INFO [RS:0;db5a5ccf5be8:45949 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-03T18:56:26,835 INFO [RS_CLOSE_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnPipelineRestart,,1733252161650.7e0175e16eb7a4a91438a495c1caf9ae. 2024-12-03T18:56:26,835 INFO [RS:0;db5a5ccf5be8:45949 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-03T18:56:26,835 DEBUG [RS_CLOSE_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnPipelineRestart,,1733252161650.7e0175e16eb7a4a91438a495c1caf9ae. 2024-12-03T18:56:26,835 INFO [RS:0;db5a5ccf5be8:45949 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-03T18:56:26,835 DEBUG [RS_CLOSE_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnPipelineRestart,,1733252161650.7e0175e16eb7a4a91438a495c1caf9ae. after waiting 0 ms 2024-12-03T18:56:26,835 DEBUG [RS_CLOSE_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnPipelineRestart,,1733252161650.7e0175e16eb7a4a91438a495c1caf9ae. 2024-12-03T18:56:26,835 INFO [RS:0;db5a5ccf5be8:45949 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-12-03T18:56:26,835 DEBUG [RS:0;db5a5ccf5be8:45949 {}] regionserver.HRegionServer(1325): Online Regions={7e0175e16eb7a4a91438a495c1caf9ae=TestLogRolling-testLogRollOnPipelineRestart,,1733252161650.7e0175e16eb7a4a91438a495c1caf9ae., 1588230740=hbase:meta,,1.1588230740} 2024-12-03T18:56:26,835 DEBUG [RS:0;db5a5ccf5be8:45949 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 7e0175e16eb7a4a91438a495c1caf9ae 2024-12-03T18:56:26,835 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:45949-0x1019c8d16e80001, quorum=127.0.0.1:60968, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-03T18:56:26,836 DEBUG [RS_CLOSE_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-03T18:56:26,836 INFO [RS_CLOSE_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-03T18:56:26,836 DEBUG [RS_CLOSE_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-03T18:56:26,836 DEBUG [RS_CLOSE_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-03T18:56:26,836 DEBUG [RS_CLOSE_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-03T18:56:26,840 DEBUG [RS_CLOSE_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36677/user/jenkins/test-data/fc8b9215-fcf9-a292-1a6c-2c28cacba1d7/data/default/TestLogRolling-testLogRollOnPipelineRestart/7e0175e16eb7a4a91438a495c1caf9ae/recovered.edits/11.seqid, newMaxSeqId=11, maxSeqId=1 2024-12-03T18:56:26,840 DEBUG [RS_CLOSE_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36677/user/jenkins/test-data/fc8b9215-fcf9-a292-1a6c-2c28cacba1d7/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-12-03T18:56:26,840 INFO [RS_CLOSE_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnPipelineRestart,,1733252161650.7e0175e16eb7a4a91438a495c1caf9ae. 2024-12-03T18:56:26,841 DEBUG [RS_CLOSE_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 7e0175e16eb7a4a91438a495c1caf9ae: Waiting for close lock at 1733252186834Running coprocessor pre-close hooks at 1733252186834Disabling compacts and flushes for region at 1733252186834Disabling writes for close at 1733252186835 (+1 ms)Writing region close event to WAL at 1733252186836 (+1 ms)Running coprocessor post-close hooks at 1733252186840 (+4 ms)Closed at 1733252186840 2024-12-03T18:56:26,841 DEBUG [RS_CLOSE_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRollOnPipelineRestart,,1733252161650.7e0175e16eb7a4a91438a495c1caf9ae. 2024-12-03T18:56:26,841 DEBUG [RS_CLOSE_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-03T18:56:26,841 INFO [RS_CLOSE_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-03T18:56:26,841 DEBUG [RS_CLOSE_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733252186836Running coprocessor pre-close hooks at 1733252186836Disabling compacts and flushes for region at 1733252186836Disabling writes for close at 1733252186836Writing region close event to WAL at 1733252186837 (+1 ms)Running coprocessor post-close hooks at 1733252186841 (+4 ms)Closed at 1733252186841 2024-12-03T18:56:26,841 DEBUG [RS_CLOSE_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-03T18:56:26,993 INFO [regionserver/db5a5ccf5be8:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-03T18:56:27,035 INFO [RS:0;db5a5ccf5be8:45949 {}] regionserver.HRegionServer(976): stopping server db5a5ccf5be8,45949,1733252160503; all regions closed. 2024-12-03T18:56:27,036 INFO [regionserver/db5a5ccf5be8:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-03T18:56:27,036 INFO [regionserver/db5a5ccf5be8:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-03T18:56:27,036 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:56:27,036 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:56:27,036 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:56:27,036 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:56:27,036 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:56:27,038 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36037 is added to blk_1073741842_1025 (size=825) 2024-12-03T18:56:27,039 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46789 is added to blk_1073741842_1025 (size=825) 2024-12-03T18:56:27,394 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,44293,1733252112865/db5a5ccf5be8%2C44293%2C1733252112865.1733252113091 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:56:27,430 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.meta.1733252112663.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:56:27,875 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-03T18:56:27,876 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-03T18:56:27,876 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart 2024-12-03T18:56:28,395 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,44293,1733252112865/db5a5ccf5be8%2C44293%2C1733252112865.1733252113091 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:56:28,430 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.meta.1733252112663.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:56:29,202 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741834_1014: GenerationStamp not matched, existing replica is blk_1073741834_1010 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-12-03T18:56:29,395 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,44293,1733252112865/db5a5ccf5be8%2C44293%2C1733252112865.1733252113091 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:56:29,431 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.meta.1733252112663.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:56:30,313 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-03T18:56:30,396 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,44293,1733252112865/db5a5ccf5be8%2C44293%2C1733252112865.1733252113091 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:56:30,431 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.meta.1733252112663.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:56:30,640 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:36677/user/jenkins/test-data/fc8b9215-fcf9-a292-1a6c-2c28cacba1d7/WALs/db5a5ccf5be8,45949,1733252160503/db5a5ccf5be8%2C45949%2C1733252160503.meta.1733252161481.meta after 4002ms 2024-12-03T18:56:30,640 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:36677/user/jenkins/test-data/fc8b9215-fcf9-a292-1a6c-2c28cacba1d7/WALs/db5a5ccf5be8,45949,1733252160503/db5a5ccf5be8%2C45949%2C1733252160503.meta.1733252161481.meta to hdfs://localhost:36677/user/jenkins/test-data/fc8b9215-fcf9-a292-1a6c-2c28cacba1d7/oldWALs/db5a5ccf5be8%2C45949%2C1733252160503.meta.1733252161481.meta 2024-12-03T18:56:30,643 DEBUG [RS:0;db5a5ccf5be8:45949 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/fc8b9215-fcf9-a292-1a6c-2c28cacba1d7/oldWALs 2024-12-03T18:56:30,643 INFO [RS:0;db5a5ccf5be8:45949 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog db5a5ccf5be8%2C45949%2C1733252160503.meta:.meta(num 1733252186626) 2024-12-03T18:56:30,643 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:56:30,643 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:56:30,643 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:56:30,643 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:56:30,644 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:56:30,645 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36037 is added to blk_1073741840_1023 (size=1162) 2024-12-03T18:56:30,645 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46789 is added to blk_1073741840_1023 (size=1162) 2024-12-03T18:56:30,649 DEBUG [RS:0;db5a5ccf5be8:45949 {}] wal.AbstractFSWAL(1256): Moved 4 WAL file(s) to /user/jenkins/test-data/fc8b9215-fcf9-a292-1a6c-2c28cacba1d7/oldWALs 2024-12-03T18:56:30,649 INFO [RS:0;db5a5ccf5be8:45949 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog db5a5ccf5be8%2C45949%2C1733252160503:(num 1733252186567) 2024-12-03T18:56:30,649 DEBUG [RS:0;db5a5ccf5be8:45949 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T18:56:30,649 INFO [RS:0;db5a5ccf5be8:45949 {}] regionserver.LeaseManager(133): Closed leases 2024-12-03T18:56:30,649 INFO [RS:0;db5a5ccf5be8:45949 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-03T18:56:30,649 INFO [RS:0;db5a5ccf5be8:45949 {}] hbase.ChoreService(370): Chore service for: regionserver/db5a5ccf5be8:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-03T18:56:30,650 INFO [RS:0;db5a5ccf5be8:45949 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-03T18:56:30,650 INFO [regionserver/db5a5ccf5be8:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-03T18:56:30,650 INFO [RS:0;db5a5ccf5be8:45949 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:45949 2024-12-03T18:56:30,695 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45865-0x1019c8d16e80000, quorum=127.0.0.1:60968, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-03T18:56:30,695 INFO [RS:0;db5a5ccf5be8:45949 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-03T18:56:30,695 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45949-0x1019c8d16e80001, quorum=127.0.0.1:60968, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/db5a5ccf5be8,45949,1733252160503 2024-12-03T18:56:30,706 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [db5a5ccf5be8,45949,1733252160503] 2024-12-03T18:56:30,716 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/db5a5ccf5be8,45949,1733252160503 already deleted, retry=false 2024-12-03T18:56:30,716 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; db5a5ccf5be8,45949,1733252160503 expired; onlineServers=0 2024-12-03T18:56:30,716 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'db5a5ccf5be8,45865,1733252160335' ***** 2024-12-03T18:56:30,716 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-03T18:56:30,716 INFO [M:0;db5a5ccf5be8:45865 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-03T18:56:30,716 INFO [M:0;db5a5ccf5be8:45865 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-03T18:56:30,716 DEBUG [M:0;db5a5ccf5be8:45865 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-03T18:56:30,716 DEBUG [M:0;db5a5ccf5be8:45865 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-03T18:56:30,716 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster-HFileCleaner.large.0-1733252160843 {}] cleaner.HFileCleaner(306): Exit Thread[master/db5a5ccf5be8:0:becomeActiveMaster-HFileCleaner.large.0-1733252160843,5,FailOnTimeoutGroup] 2024-12-03T18:56:30,717 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster-HFileCleaner.small.0-1733252160843 {}] cleaner.HFileCleaner(306): Exit Thread[master/db5a5ccf5be8:0:becomeActiveMaster-HFileCleaner.small.0-1733252160843,5,FailOnTimeoutGroup] 2024-12-03T18:56:30,717 INFO [M:0;db5a5ccf5be8:45865 {}] hbase.ChoreService(370): Chore service for: master/db5a5ccf5be8:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-03T18:56:30,717 INFO [M:0;db5a5ccf5be8:45865 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-03T18:56:30,717 DEBUG [M:0;db5a5ccf5be8:45865 {}] master.HMaster(1795): Stopping service threads 2024-12-03T18:56:30,717 INFO [M:0;db5a5ccf5be8:45865 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-03T18:56:30,717 INFO [M:0;db5a5ccf5be8:45865 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-03T18:56:30,717 INFO [M:0;db5a5ccf5be8:45865 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-03T18:56:30,717 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-03T18:56:30,717 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-03T18:56:30,726 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45865-0x1019c8d16e80000, quorum=127.0.0.1:60968, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-03T18:56:30,727 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45865-0x1019c8d16e80000, quorum=127.0.0.1:60968, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T18:56:30,737 DEBUG [M:0;db5a5ccf5be8:45865 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/master already deleted, retry=false 2024-12-03T18:56:30,737 DEBUG [M:0;db5a5ccf5be8:45865 {}] master.ActiveMasterManager(353): master:45865-0x1019c8d16e80000, quorum=127.0.0.1:60968, baseZNode=/hbase Failed delete of our master address node; KeeperErrorCode = NoNode for /hbase/master 2024-12-03T18:56:30,738 INFO [M:0;db5a5ccf5be8:45865 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:36677/user/jenkins/test-data/fc8b9215-fcf9-a292-1a6c-2c28cacba1d7/.lastflushedseqids 2024-12-03T18:56:30,748 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46789 is added to blk_1073741846_1030 (size=111) 2024-12-03T18:56:30,748 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36037 is added to blk_1073741846_1030 (size=111) 2024-12-03T18:56:30,806 INFO [RS:0;db5a5ccf5be8:45949 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-03T18:56:30,806 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45949-0x1019c8d16e80001, quorum=127.0.0.1:60968, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-03T18:56:30,806 INFO [RS:0;db5a5ccf5be8:45949 {}] regionserver.HRegionServer(1031): Exiting; stopping=db5a5ccf5be8,45949,1733252160503; zookeeper connection closed. 2024-12-03T18:56:30,806 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45949-0x1019c8d16e80001, quorum=127.0.0.1:60968, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-03T18:56:30,806 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@7154d2c6 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@7154d2c6 2024-12-03T18:56:30,807 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-12-03T18:56:31,147 INFO [M:0;db5a5ccf5be8:45865 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-03T18:56:31,147 INFO [M:0;db5a5ccf5be8:45865 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-03T18:56:31,147 DEBUG [M:0;db5a5ccf5be8:45865 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-03T18:56:31,147 INFO [M:0;db5a5ccf5be8:45865 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T18:56:31,147 DEBUG [M:0;db5a5ccf5be8:45865 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T18:56:31,147 DEBUG [M:0;db5a5ccf5be8:45865 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-03T18:56:31,147 DEBUG [M:0;db5a5ccf5be8:45865 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T18:56:31,148 INFO [M:0;db5a5ccf5be8:45865 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.17 KB heapSize=29.16 KB 2024-12-03T18:56:31,148 ERROR [FSHLog-0-hdfs://localhost:36677/user/jenkins/test-data/fc8b9215-fcf9-a292-1a6c-2c28cacba1d7/MasterData-prefix:db5a5ccf5be8,45865,1733252160335 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46217,DS-63a5d467-e8c8-4238-ba88-56526b3f9bf2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T18:56:31,148 WARN [FSHLog-0-hdfs://localhost:36677/user/jenkins/test-data/fc8b9215-fcf9-a292-1a6c-2c28cacba1d7/MasterData-prefix:db5a5ccf5be8,45865,1733252160335 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46217,DS-63a5d467-e8c8-4238-ba88-56526b3f9bf2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T18:56:31,148 DEBUG [master:store-WAL-Roller {}] wal.AbstractWALRoller(198): WAL FSHLog db5a5ccf5be8%2C45865%2C1733252160335:(num 1733252160629) roll requested 2024-12-03T18:56:31,148 INFO [master:store-WAL-Roller {}] monitor.StreamSlowMonitor(122): New stream slow monitor db5a5ccf5be8%2C45865%2C1733252160335.1733252191148 2024-12-03T18:56:31,153 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:56:31,153 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:56:31,153 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:56:31,153 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:56:31,153 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:56:31,153 INFO [master:store-WAL-Roller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/fc8b9215-fcf9-a292-1a6c-2c28cacba1d7/MasterData/WALs/db5a5ccf5be8,45865,1733252160335/db5a5ccf5be8%2C45865%2C1733252160335.1733252160629 with entries=53, filesize=26.62 KB; new WAL /user/jenkins/test-data/fc8b9215-fcf9-a292-1a6c-2c28cacba1d7/MasterData/WALs/db5a5ccf5be8,45865,1733252160335/db5a5ccf5be8%2C45865%2C1733252160335.1733252191148 2024-12-03T18:56:31,153 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46217,DS-63a5d467-e8c8-4238-ba88-56526b3f9bf2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T18:56:31,154 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46217,DS-63a5d467-e8c8-4238-ba88-56526b3f9bf2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T18:56:31,154 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:36677/user/jenkins/test-data/fc8b9215-fcf9-a292-1a6c-2c28cacba1d7/MasterData/WALs/db5a5ccf5be8,45865,1733252160335/db5a5ccf5be8%2C45865%2C1733252160335.1733252160629 2024-12-03T18:56:31,154 WARN [IPC Server handler 3 on default port 36677 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/fc8b9215-fcf9-a292-1a6c-2c28cacba1d7/MasterData/WALs/db5a5ccf5be8,45865,1733252160335/db5a5ccf5be8%2C45865%2C1733252160335.1733252160629 has not been closed. Lease recovery is in progress. RecoveryId = 1032 for block blk_1073741830_1015 2024-12-03T18:56:31,154 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:36677/user/jenkins/test-data/fc8b9215-fcf9-a292-1a6c-2c28cacba1d7/MasterData/WALs/db5a5ccf5be8,45865,1733252160335/db5a5ccf5be8%2C45865%2C1733252160335.1733252160629 after 0ms 2024-12-03T18:56:31,156 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41731:41731),(127.0.0.1/127.0.0.1:41023:41023)] 2024-12-03T18:56:31,157 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(879): hdfs://localhost:36677/user/jenkins/test-data/fc8b9215-fcf9-a292-1a6c-2c28cacba1d7/MasterData/WALs/db5a5ccf5be8,45865,1733252160335/db5a5ccf5be8%2C45865%2C1733252160335.1733252160629 is not closed yet, will try archiving it next time 2024-12-03T18:56:31,175 DEBUG [M:0;db5a5ccf5be8:45865 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36677/user/jenkins/test-data/fc8b9215-fcf9-a292-1a6c-2c28cacba1d7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/6701b0c930254fc09e35f85aebbf33e5 is 82, key is hbase:meta,,1/info:regioninfo/1733252161511/Put/seqid=0 2024-12-03T18:56:31,182 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46789 is added to blk_1073741848_1033 (size=5672) 2024-12-03T18:56:31,182 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36037 is added to blk_1073741848_1033 (size=5672) 2024-12-03T18:56:31,183 INFO [M:0;db5a5ccf5be8:45865 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:36677/user/jenkins/test-data/fc8b9215-fcf9-a292-1a6c-2c28cacba1d7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/6701b0c930254fc09e35f85aebbf33e5 2024-12-03T18:56:31,205 DEBUG [M:0;db5a5ccf5be8:45865 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36677/user/jenkins/test-data/fc8b9215-fcf9-a292-1a6c-2c28cacba1d7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/bc750842774346b68ea84ee68dddaa23 is 778, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1733252162440/Put/seqid=0 2024-12-03T18:56:31,210 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36037 is added to blk_1073741849_1034 (size=6118) 2024-12-03T18:56:31,210 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46789 is added to blk_1073741849_1034 (size=6118) 2024-12-03T18:56:31,213 INFO [M:0;db5a5ccf5be8:45865 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.57 KB at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:36677/user/jenkins/test-data/fc8b9215-fcf9-a292-1a6c-2c28cacba1d7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/bc750842774346b68ea84ee68dddaa23 2024-12-03T18:56:31,231 DEBUG [M:0;db5a5ccf5be8:45865 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36677/user/jenkins/test-data/fc8b9215-fcf9-a292-1a6c-2c28cacba1d7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/42d3d859a9e742d89a82771fce8dbaca is 69, key is db5a5ccf5be8,45949,1733252160503/rs:state/1733252160965/Put/seqid=0 2024-12-03T18:56:31,235 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36037 is added to blk_1073741850_1035 (size=5156) 2024-12-03T18:56:31,235 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46789 is added to blk_1073741850_1035 (size=5156) 2024-12-03T18:56:31,236 INFO [M:0;db5a5ccf5be8:45865 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:36677/user/jenkins/test-data/fc8b9215-fcf9-a292-1a6c-2c28cacba1d7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/42d3d859a9e742d89a82771fce8dbaca 2024-12-03T18:56:31,254 DEBUG [M:0;db5a5ccf5be8:45865 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36677/user/jenkins/test-data/fc8b9215-fcf9-a292-1a6c-2c28cacba1d7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/d1c3d5fdc5124db494513ce98dfc3a76 is 52, key is load_balancer_on/state:d/1733252161646/Put/seqid=0 2024-12-03T18:56:31,259 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36037 is added to blk_1073741851_1036 (size=5056) 2024-12-03T18:56:31,259 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46789 is added to blk_1073741851_1036 (size=5056) 2024-12-03T18:56:31,263 INFO [M:0;db5a5ccf5be8:45865 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:36677/user/jenkins/test-data/fc8b9215-fcf9-a292-1a6c-2c28cacba1d7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/d1c3d5fdc5124db494513ce98dfc3a76 2024-12-03T18:56:31,268 DEBUG [M:0;db5a5ccf5be8:45865 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36677/user/jenkins/test-data/fc8b9215-fcf9-a292-1a6c-2c28cacba1d7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/6701b0c930254fc09e35f85aebbf33e5 as hdfs://localhost:36677/user/jenkins/test-data/fc8b9215-fcf9-a292-1a6c-2c28cacba1d7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/6701b0c930254fc09e35f85aebbf33e5 2024-12-03T18:56:31,274 INFO [M:0;db5a5ccf5be8:45865 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36677/user/jenkins/test-data/fc8b9215-fcf9-a292-1a6c-2c28cacba1d7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/6701b0c930254fc09e35f85aebbf33e5, entries=8, sequenceid=56, filesize=5.5 K 2024-12-03T18:56:31,275 DEBUG [M:0;db5a5ccf5be8:45865 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36677/user/jenkins/test-data/fc8b9215-fcf9-a292-1a6c-2c28cacba1d7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/bc750842774346b68ea84ee68dddaa23 as hdfs://localhost:36677/user/jenkins/test-data/fc8b9215-fcf9-a292-1a6c-2c28cacba1d7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/bc750842774346b68ea84ee68dddaa23 2024-12-03T18:56:31,282 INFO [M:0;db5a5ccf5be8:45865 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36677/user/jenkins/test-data/fc8b9215-fcf9-a292-1a6c-2c28cacba1d7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/bc750842774346b68ea84ee68dddaa23, entries=6, sequenceid=56, filesize=6.0 K 2024-12-03T18:56:31,282 DEBUG [M:0;db5a5ccf5be8:45865 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36677/user/jenkins/test-data/fc8b9215-fcf9-a292-1a6c-2c28cacba1d7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/42d3d859a9e742d89a82771fce8dbaca as hdfs://localhost:36677/user/jenkins/test-data/fc8b9215-fcf9-a292-1a6c-2c28cacba1d7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/42d3d859a9e742d89a82771fce8dbaca 2024-12-03T18:56:31,287 INFO [M:0;db5a5ccf5be8:45865 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36677/user/jenkins/test-data/fc8b9215-fcf9-a292-1a6c-2c28cacba1d7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/42d3d859a9e742d89a82771fce8dbaca, entries=1, sequenceid=56, filesize=5.0 K 2024-12-03T18:56:31,288 DEBUG [M:0;db5a5ccf5be8:45865 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36677/user/jenkins/test-data/fc8b9215-fcf9-a292-1a6c-2c28cacba1d7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/d1c3d5fdc5124db494513ce98dfc3a76 as hdfs://localhost:36677/user/jenkins/test-data/fc8b9215-fcf9-a292-1a6c-2c28cacba1d7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/d1c3d5fdc5124db494513ce98dfc3a76 2024-12-03T18:56:31,293 INFO [M:0;db5a5ccf5be8:45865 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36677/user/jenkins/test-data/fc8b9215-fcf9-a292-1a6c-2c28cacba1d7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/d1c3d5fdc5124db494513ce98dfc3a76, entries=1, sequenceid=56, filesize=4.9 K 2024-12-03T18:56:31,294 INFO [M:0;db5a5ccf5be8:45865 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.17 KB/23726, heapSize ~29.10 KB/29800, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 147ms, sequenceid=56, compaction requested=false 2024-12-03T18:56:31,296 INFO [M:0;db5a5ccf5be8:45865 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T18:56:31,296 DEBUG [M:0;db5a5ccf5be8:45865 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733252191147Disabling compacts and flushes for region at 1733252191147Disabling writes for close at 1733252191147Obtaining lock to block concurrent updates at 1733252191148 (+1 ms)Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733252191148Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23726, getHeapSize=29800, getOffHeapSize=0, getCellsCount=67 at 1733252191148Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733252191157 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733252191157Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733252191175 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733252191175Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733252191190 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733252191204 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733252191204Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733252191217 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733252191231 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733252191231Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1733252191241 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1733252191253 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1733252191253Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@367ca9d9: reopening flushed file at 1733252191267 (+14 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7dd60f6c: reopening flushed file at 1733252191274 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5f36fd00: reopening flushed file at 1733252191282 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2d6513a0: reopening flushed file at 1733252191287 (+5 ms)Finished flush of dataSize ~23.17 KB/23726, heapSize ~29.10 KB/29800, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 147ms, sequenceid=56, compaction requested=false at 1733252191294 (+7 ms)Writing region close event to WAL at 1733252191296 (+2 ms)Closed at 1733252191296 2024-12-03T18:56:31,296 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:56:31,296 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:56:31,296 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:56:31,297 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:56:31,297 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:56:31,298 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46789 is added to blk_1073741847_1031 (size=757) 2024-12-03T18:56:31,298 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36037 is added to blk_1073741847_1031 (size=757) 2024-12-03T18:56:31,396 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,44293,1733252112865/db5a5ccf5be8%2C44293%2C1733252112865.1733252113091 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:56:31,432 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.meta.1733252112663.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:56:31,841 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:56:31,841 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:56:31,852 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:56:31,853 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:56:31,853 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:56:31,853 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:56:31,853 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:56:31,853 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:56:31,857 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:56:31,857 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:56:31,857 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:56:31,859 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:56:31,863 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:56:31,863 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:56:32,366 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-03T18:56:32,366 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:56:32,367 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:56:32,367 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:56:32,367 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:56:32,382 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:56:32,383 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:56:32,383 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:56:32,383 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:56:32,383 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:56:32,384 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:56:32,389 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:56:32,389 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:56:32,389 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:56:32,393 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:56:32,397 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,44293,1733252112865/db5a5ccf5be8%2C44293%2C1733252112865.1733252113091 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:56:32,433 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.meta.1733252112663.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:56:33,397 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,44293,1733252112865/db5a5ccf5be8%2C44293%2C1733252112865.1733252113091 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:56:33,433 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.meta.1733252112663.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:56:34,398 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,44293,1733252112865/db5a5ccf5be8%2C44293%2C1733252112865.1733252113091 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:56:34,434 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.meta.1733252112663.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:56:35,155 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:36677/user/jenkins/test-data/fc8b9215-fcf9-a292-1a6c-2c28cacba1d7/MasterData/WALs/db5a5ccf5be8,45865,1733252160335/db5a5ccf5be8%2C45865%2C1733252160335.1733252160629 after 4001ms 2024-12-03T18:56:35,155 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:36677/user/jenkins/test-data/fc8b9215-fcf9-a292-1a6c-2c28cacba1d7/MasterData/WALs/db5a5ccf5be8,45865,1733252160335/db5a5ccf5be8%2C45865%2C1733252160335.1733252160629 to hdfs://localhost:36677/user/jenkins/test-data/fc8b9215-fcf9-a292-1a6c-2c28cacba1d7/MasterData/oldWALs/db5a5ccf5be8%2C45865%2C1733252160335.1733252160629 2024-12-03T18:56:35,158 INFO [WAL-Archive-0 {}] region.MasterRegionUtils(50): Moved hdfs://localhost:36677/user/jenkins/test-data/fc8b9215-fcf9-a292-1a6c-2c28cacba1d7/MasterData/oldWALs/db5a5ccf5be8%2C45865%2C1733252160335.1733252160629 to hdfs://localhost:36677/user/jenkins/test-data/fc8b9215-fcf9-a292-1a6c-2c28cacba1d7/oldWALs/db5a5ccf5be8%2C45865%2C1733252160335.1733252160629$masterlocalwal$ 2024-12-03T18:56:35,159 INFO [M:0;db5a5ccf5be8:45865 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-03T18:56:35,159 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-03T18:56:35,159 INFO [M:0;db5a5ccf5be8:45865 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:45865 2024-12-03T18:56:35,159 INFO [M:0;db5a5ccf5be8:45865 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-03T18:56:35,203 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741830_1015: GenerationStamp not matched, existing replica is blk_1073741830_1006 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-12-03T18:56:35,279 INFO [M:0;db5a5ccf5be8:45865 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-03T18:56:35,279 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45865-0x1019c8d16e80000, quorum=127.0.0.1:60968, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-03T18:56:35,279 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45865-0x1019c8d16e80000, quorum=127.0.0.1:60968, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-03T18:56:35,309 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4fc14d61{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T18:56:35,309 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@66269315{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-03T18:56:35,309 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-03T18:56:35,310 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@21ffcd24{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-03T18:56:35,310 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@10215f32{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e40a913-c135-17dc-08ad-3f47afab457a/hadoop.log.dir/,STOPPED} 2024-12-03T18:56:35,311 WARN [BP-1677972484-172.17.0.2-1733252158340 heartbeating to localhost/127.0.0.1:36677 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-03T18:56:35,311 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-03T18:56:35,311 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-03T18:56:35,311 WARN [BP-1677972484-172.17.0.2-1733252158340 heartbeating to localhost/127.0.0.1:36677 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1677972484-172.17.0.2-1733252158340 (Datanode Uuid 72a14fa2-1e94-48af-8932-1d0a79709813) service to localhost/127.0.0.1:36677 2024-12-03T18:56:35,312 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e40a913-c135-17dc-08ad-3f47afab457a/cluster_e67e287f-8396-1ac8-75f8-a9eceb442aa7/data/data3/current/BP-1677972484-172.17.0.2-1733252158340 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T18:56:35,312 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e40a913-c135-17dc-08ad-3f47afab457a/cluster_e67e287f-8396-1ac8-75f8-a9eceb442aa7/data/data4/current/BP-1677972484-172.17.0.2-1733252158340 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T18:56:35,312 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-03T18:56:35,322 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1cb32f68{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T18:56:35,322 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1a15b38f{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-03T18:56:35,322 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-03T18:56:35,323 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5a0cdfff{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-03T18:56:35,323 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7790ff99{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e40a913-c135-17dc-08ad-3f47afab457a/hadoop.log.dir/,STOPPED} 2024-12-03T18:56:35,324 WARN [BP-1677972484-172.17.0.2-1733252158340 heartbeating to localhost/127.0.0.1:36677 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-03T18:56:35,324 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-03T18:56:35,324 WARN [BP-1677972484-172.17.0.2-1733252158340 heartbeating to localhost/127.0.0.1:36677 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1677972484-172.17.0.2-1733252158340 (Datanode Uuid bd6c022d-b2da-4b7e-9001-0af9fe78716f) service to localhost/127.0.0.1:36677 2024-12-03T18:56:35,324 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-03T18:56:35,325 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e40a913-c135-17dc-08ad-3f47afab457a/cluster_e67e287f-8396-1ac8-75f8-a9eceb442aa7/data/data1/current/BP-1677972484-172.17.0.2-1733252158340 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T18:56:35,325 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e40a913-c135-17dc-08ad-3f47afab457a/cluster_e67e287f-8396-1ac8-75f8-a9eceb442aa7/data/data2/current/BP-1677972484-172.17.0.2-1733252158340 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T18:56:35,325 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-03T18:56:35,331 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@60c4032c{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-03T18:56:35,332 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@42369482{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-03T18:56:35,332 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-03T18:56:35,332 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@21865735{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-03T18:56:35,332 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3bf7054a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e40a913-c135-17dc-08ad-3f47afab457a/hadoop.log.dir/,STOPPED} 2024-12-03T18:56:35,339 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-03T18:56:35,364 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-03T18:56:35,373 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnPipelineRestart Thread=181 (was 156) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:36677 from jenkins.hfs.4 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-33-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:36677 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-15 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-14 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:36677 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:36677 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HMaster-EventLoopGroup-10-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:36677 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-13 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:36677 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-30-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.4@localhost:36677 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-12 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:36677 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=455 (was 450) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=196 (was 124) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=6098 (was 6725) 2024-12-03T18:56:35,380 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testCompactionRecordDoesntBlockRolling Thread=181, OpenFileDescriptor=455, MaxFileDescriptor=1048576, SystemLoadAverage=196, ProcessCount=11, AvailableMemoryMB=6095 2024-12-03T18:56:35,381 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-03T18:56:35,381 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e40a913-c135-17dc-08ad-3f47afab457a/hadoop.log.dir so I do NOT create it in target/test-data/ddad0867-e686-affa-191f-3afcc7d4395f 2024-12-03T18:56:35,381 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e40a913-c135-17dc-08ad-3f47afab457a/hadoop.tmp.dir so I do NOT create it in target/test-data/ddad0867-e686-affa-191f-3afcc7d4395f 2024-12-03T18:56:35,381 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ddad0867-e686-affa-191f-3afcc7d4395f/cluster_e0760362-1a4b-c678-a93c-a55a7d70ae0d, deleteOnExit=true 2024-12-03T18:56:35,381 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-03T18:56:35,381 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ddad0867-e686-affa-191f-3afcc7d4395f/test.cache.data in system properties and HBase conf 2024-12-03T18:56:35,382 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ddad0867-e686-affa-191f-3afcc7d4395f/hadoop.tmp.dir in system properties and HBase conf 2024-12-03T18:56:35,382 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ddad0867-e686-affa-191f-3afcc7d4395f/hadoop.log.dir in system properties and HBase conf 2024-12-03T18:56:35,382 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ddad0867-e686-affa-191f-3afcc7d4395f/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-03T18:56:35,382 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ddad0867-e686-affa-191f-3afcc7d4395f/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-03T18:56:35,382 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-03T18:56:35,382 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-03T18:56:35,382 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ddad0867-e686-affa-191f-3afcc7d4395f/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-03T18:56:35,382 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ddad0867-e686-affa-191f-3afcc7d4395f/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-03T18:56:35,382 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ddad0867-e686-affa-191f-3afcc7d4395f/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-03T18:56:35,382 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ddad0867-e686-affa-191f-3afcc7d4395f/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-03T18:56:35,383 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ddad0867-e686-affa-191f-3afcc7d4395f/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-03T18:56:35,383 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ddad0867-e686-affa-191f-3afcc7d4395f/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-03T18:56:35,383 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ddad0867-e686-affa-191f-3afcc7d4395f/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-03T18:56:35,383 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ddad0867-e686-affa-191f-3afcc7d4395f/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-03T18:56:35,383 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ddad0867-e686-affa-191f-3afcc7d4395f/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-03T18:56:35,383 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ddad0867-e686-affa-191f-3afcc7d4395f/nfs.dump.dir in system properties and HBase conf 2024-12-03T18:56:35,383 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ddad0867-e686-affa-191f-3afcc7d4395f/java.io.tmpdir in system properties and HBase conf 2024-12-03T18:56:35,383 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ddad0867-e686-affa-191f-3afcc7d4395f/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-03T18:56:35,383 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ddad0867-e686-affa-191f-3afcc7d4395f/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-03T18:56:35,383 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ddad0867-e686-affa-191f-3afcc7d4395f/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-03T18:56:35,395 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-03T18:56:35,399 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,44293,1733252112865/db5a5ccf5be8%2C44293%2C1733252112865.1733252113091 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:56:35,434 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.meta.1733252112663.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:56:35,921 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-03T18:56:35,925 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-03T18:56:35,926 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-03T18:56:35,926 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-03T18:56:35,926 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-03T18:56:35,927 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-03T18:56:35,927 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@47d3f616{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ddad0867-e686-affa-191f-3afcc7d4395f/hadoop.log.dir/,AVAILABLE} 2024-12-03T18:56:35,928 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2aaa4790{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-03T18:56:36,041 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6c243e85{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ddad0867-e686-affa-191f-3afcc7d4395f/java.io.tmpdir/jetty-localhost-34747-hadoop-hdfs-3_4_1-tests_jar-_-any-13808269041564370568/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-03T18:56:36,041 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4cf5e3df{HTTP/1.1, (http/1.1)}{localhost:34747} 2024-12-03T18:56:36,041 INFO [Time-limited test {}] server.Server(415): Started @193918ms 2024-12-03T18:56:36,058 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-03T18:56:36,348 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-03T18:56:36,350 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-03T18:56:36,351 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-03T18:56:36,351 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-03T18:56:36,351 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-03T18:56:36,351 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4d36967f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ddad0867-e686-affa-191f-3afcc7d4395f/hadoop.log.dir/,AVAILABLE} 2024-12-03T18:56:36,351 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3be31a0b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-03T18:56:36,399 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,44293,1733252112865/db5a5ccf5be8%2C44293%2C1733252112865.1733252113091 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:56:36,435 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.meta.1733252112663.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:56:36,453 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3165abde{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ddad0867-e686-affa-191f-3afcc7d4395f/java.io.tmpdir/jetty-localhost-46711-hadoop-hdfs-3_4_1-tests_jar-_-any-14447961528651635967/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T18:56:36,453 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4911f4c5{HTTP/1.1, (http/1.1)}{localhost:46711} 2024-12-03T18:56:36,453 INFO [Time-limited test {}] server.Server(415): Started @194330ms 2024-12-03T18:56:36,454 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-03T18:56:36,487 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-03T18:56:36,491 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-03T18:56:36,492 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-03T18:56:36,492 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-03T18:56:36,492 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-03T18:56:36,492 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@523d16c3{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ddad0867-e686-affa-191f-3afcc7d4395f/hadoop.log.dir/,AVAILABLE} 2024-12-03T18:56:36,493 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1beefc80{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-03T18:56:36,621 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@34fe4f6c{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ddad0867-e686-affa-191f-3afcc7d4395f/java.io.tmpdir/jetty-localhost-39891-hadoop-hdfs-3_4_1-tests_jar-_-any-11999996222810641655/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T18:56:36,621 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6623a859{HTTP/1.1, (http/1.1)}{localhost:39891} 2024-12-03T18:56:36,621 INFO [Time-limited test {}] server.Server(415): Started @194498ms 2024-12-03T18:56:36,622 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-03T18:56:37,400 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,44293,1733252112865/db5a5ccf5be8%2C44293%2C1733252112865.1733252113091 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:56:37,436 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.meta.1733252112663.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:56:37,704 WARN [Thread-1668 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ddad0867-e686-affa-191f-3afcc7d4395f/cluster_e0760362-1a4b-c678-a93c-a55a7d70ae0d/data/data1/current/BP-1336678484-172.17.0.2-1733252195406/current, will proceed with Du for space computation calculation, 2024-12-03T18:56:37,704 WARN [Thread-1669 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ddad0867-e686-affa-191f-3afcc7d4395f/cluster_e0760362-1a4b-c678-a93c-a55a7d70ae0d/data/data2/current/BP-1336678484-172.17.0.2-1733252195406/current, will proceed with Du for space computation calculation, 2024-12-03T18:56:37,733 WARN [Thread-1632 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-03T18:56:37,738 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x41a7e5c76eb06dd3 with lease ID 0xe28116e64099397a: Processing first storage report for DS-5b09777d-e4e6-46b6-94fa-76d2e1323afd from datanode DatanodeRegistration(127.0.0.1:34083, datanodeUuid=c9729e61-da1d-4936-985e-9c5414b076b8, infoPort=45671, infoSecurePort=0, ipcPort=38781, storageInfo=lv=-57;cid=testClusterID;nsid=258581528;c=1733252195406) 2024-12-03T18:56:37,738 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x41a7e5c76eb06dd3 with lease ID 0xe28116e64099397a: from storage DS-5b09777d-e4e6-46b6-94fa-76d2e1323afd node DatanodeRegistration(127.0.0.1:34083, datanodeUuid=c9729e61-da1d-4936-985e-9c5414b076b8, infoPort=45671, infoSecurePort=0, ipcPort=38781, storageInfo=lv=-57;cid=testClusterID;nsid=258581528;c=1733252195406), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-03T18:56:37,738 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x41a7e5c76eb06dd3 with lease ID 0xe28116e64099397a: Processing first storage report for DS-da6bfd16-721d-4e76-a925-2193b2cf2b21 from datanode DatanodeRegistration(127.0.0.1:34083, datanodeUuid=c9729e61-da1d-4936-985e-9c5414b076b8, infoPort=45671, infoSecurePort=0, ipcPort=38781, storageInfo=lv=-57;cid=testClusterID;nsid=258581528;c=1733252195406) 2024-12-03T18:56:37,738 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x41a7e5c76eb06dd3 with lease ID 0xe28116e64099397a: from storage DS-da6bfd16-721d-4e76-a925-2193b2cf2b21 node DatanodeRegistration(127.0.0.1:34083, datanodeUuid=c9729e61-da1d-4936-985e-9c5414b076b8, infoPort=45671, infoSecurePort=0, ipcPort=38781, storageInfo=lv=-57;cid=testClusterID;nsid=258581528;c=1733252195406), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-03T18:56:37,875 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-12-03T18:56:37,922 WARN [Thread-1680 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ddad0867-e686-affa-191f-3afcc7d4395f/cluster_e0760362-1a4b-c678-a93c-a55a7d70ae0d/data/data4/current/BP-1336678484-172.17.0.2-1733252195406/current, will proceed with Du for space computation calculation, 2024-12-03T18:56:37,922 WARN [Thread-1679 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ddad0867-e686-affa-191f-3afcc7d4395f/cluster_e0760362-1a4b-c678-a93c-a55a7d70ae0d/data/data3/current/BP-1336678484-172.17.0.2-1733252195406/current, will proceed with Du for space computation calculation, 2024-12-03T18:56:37,937 WARN [Thread-1655 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-03T18:56:37,939 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x221b11e0db56bc6c with lease ID 0xe28116e64099397b: Processing first storage report for DS-7d72de5e-3855-4eb8-b27d-49fc180e46ee from datanode DatanodeRegistration(127.0.0.1:41215, datanodeUuid=7dab5863-8acc-4081-81a2-c09abff57dac, infoPort=35857, infoSecurePort=0, ipcPort=34987, storageInfo=lv=-57;cid=testClusterID;nsid=258581528;c=1733252195406) 2024-12-03T18:56:37,939 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x221b11e0db56bc6c with lease ID 0xe28116e64099397b: from storage DS-7d72de5e-3855-4eb8-b27d-49fc180e46ee node DatanodeRegistration(127.0.0.1:41215, datanodeUuid=7dab5863-8acc-4081-81a2-c09abff57dac, infoPort=35857, infoSecurePort=0, ipcPort=34987, storageInfo=lv=-57;cid=testClusterID;nsid=258581528;c=1733252195406), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-03T18:56:37,940 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x221b11e0db56bc6c with lease ID 0xe28116e64099397b: Processing first storage report for DS-a75696a8-8967-4fa1-9929-d84685c8efd3 from datanode DatanodeRegistration(127.0.0.1:41215, datanodeUuid=7dab5863-8acc-4081-81a2-c09abff57dac, infoPort=35857, infoSecurePort=0, ipcPort=34987, storageInfo=lv=-57;cid=testClusterID;nsid=258581528;c=1733252195406) 2024-12-03T18:56:37,940 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x221b11e0db56bc6c with lease ID 0xe28116e64099397b: from storage DS-a75696a8-8967-4fa1-9929-d84685c8efd3 node DatanodeRegistration(127.0.0.1:41215, datanodeUuid=7dab5863-8acc-4081-81a2-c09abff57dac, infoPort=35857, infoSecurePort=0, ipcPort=34987, storageInfo=lv=-57;cid=testClusterID;nsid=258581528;c=1733252195406), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-03T18:56:37,953 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ddad0867-e686-affa-191f-3afcc7d4395f 2024-12-03T18:56:37,961 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ddad0867-e686-affa-191f-3afcc7d4395f/cluster_e0760362-1a4b-c678-a93c-a55a7d70ae0d/zookeeper_0, clientPort=60004, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ddad0867-e686-affa-191f-3afcc7d4395f/cluster_e0760362-1a4b-c678-a93c-a55a7d70ae0d/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ddad0867-e686-affa-191f-3afcc7d4395f/cluster_e0760362-1a4b-c678-a93c-a55a7d70ae0d/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-03T18:56:37,962 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=60004 2024-12-03T18:56:37,962 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T18:56:37,963 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T18:56:37,974 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34083 is added to blk_1073741825_1001 (size=7) 2024-12-03T18:56:37,974 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41215 is added to blk_1073741825_1001 (size=7) 2024-12-03T18:56:37,975 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:37713/user/jenkins/test-data/3b55791f-cea7-8ac8-db4f-7cb0af809f0f with version=8 2024-12-03T18:56:37,975 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:42673/user/jenkins/test-data/6807bb60-f78c-0ede-7870-b94fe8933208/hbase-staging 2024-12-03T18:56:37,977 INFO [Time-limited test {}] client.ConnectionUtils(128): master/db5a5ccf5be8:0 server-side Connection retries=45 2024-12-03T18:56:37,977 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-03T18:56:37,977 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-03T18:56:37,977 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-03T18:56:37,977 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-03T18:56:37,977 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-03T18:56:37,977 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-03T18:56:37,977 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-03T18:56:37,978 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:34057 2024-12-03T18:56:37,979 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:34057 connecting to ZooKeeper ensemble=127.0.0.1:60004 2024-12-03T18:56:38,028 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:340570x0, quorum=127.0.0.1:60004, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-03T18:56:38,032 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:34057-0x1019c8da9ee0000 connected 2024-12-03T18:56:38,116 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T18:56:38,117 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T18:56:38,119 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:34057-0x1019c8da9ee0000, quorum=127.0.0.1:60004, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-03T18:56:38,119 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:37713/user/jenkins/test-data/3b55791f-cea7-8ac8-db4f-7cb0af809f0f, hbase.cluster.distributed=false 2024-12-03T18:56:38,121 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:34057-0x1019c8da9ee0000, quorum=127.0.0.1:60004, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-03T18:56:38,121 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=34057 2024-12-03T18:56:38,121 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=34057 2024-12-03T18:56:38,122 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=34057 2024-12-03T18:56:38,122 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=34057 2024-12-03T18:56:38,122 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=34057 2024-12-03T18:56:38,139 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/db5a5ccf5be8:0 server-side Connection retries=45 2024-12-03T18:56:38,139 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-03T18:56:38,139 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-03T18:56:38,139 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-03T18:56:38,139 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-03T18:56:38,139 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-03T18:56:38,139 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-03T18:56:38,139 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-03T18:56:38,140 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:39923 2024-12-03T18:56:38,141 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:39923 connecting to ZooKeeper ensemble=127.0.0.1:60004 2024-12-03T18:56:38,142 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T18:56:38,143 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T18:56:38,158 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:399230x0, quorum=127.0.0.1:60004, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-03T18:56:38,158 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:399230x0, quorum=127.0.0.1:60004, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-03T18:56:38,158 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:39923-0x1019c8da9ee0001 connected 2024-12-03T18:56:38,158 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-03T18:56:38,159 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-03T18:56:38,160 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39923-0x1019c8da9ee0001, quorum=127.0.0.1:60004, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-03T18:56:38,161 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39923-0x1019c8da9ee0001, quorum=127.0.0.1:60004, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-03T18:56:38,161 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=39923 2024-12-03T18:56:38,161 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=39923 2024-12-03T18:56:38,162 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=39923 2024-12-03T18:56:38,162 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=39923 2024-12-03T18:56:38,162 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=39923 2024-12-03T18:56:38,174 DEBUG [M:0;db5a5ccf5be8:34057 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;db5a5ccf5be8:34057 2024-12-03T18:56:38,174 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/db5a5ccf5be8,34057,1733252197976 2024-12-03T18:56:38,186 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39923-0x1019c8da9ee0001, quorum=127.0.0.1:60004, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-03T18:56:38,186 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34057-0x1019c8da9ee0000, quorum=127.0.0.1:60004, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-03T18:56:38,187 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:34057-0x1019c8da9ee0000, quorum=127.0.0.1:60004, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/db5a5ccf5be8,34057,1733252197976 2024-12-03T18:56:38,199 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39923-0x1019c8da9ee0001, quorum=127.0.0.1:60004, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-03T18:56:38,199 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34057-0x1019c8da9ee0000, quorum=127.0.0.1:60004, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T18:56:38,200 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39923-0x1019c8da9ee0001, quorum=127.0.0.1:60004, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T18:56:38,200 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:34057-0x1019c8da9ee0000, quorum=127.0.0.1:60004, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-03T18:56:38,200 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/db5a5ccf5be8,34057,1733252197976 from backup master directory 2024-12-03T18:56:38,210 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34057-0x1019c8da9ee0000, quorum=127.0.0.1:60004, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/db5a5ccf5be8,34057,1733252197976 2024-12-03T18:56:38,210 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39923-0x1019c8da9ee0001, quorum=127.0.0.1:60004, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-03T18:56:38,210 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34057-0x1019c8da9ee0000, quorum=127.0.0.1:60004, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-03T18:56:38,210 WARN [master/db5a5ccf5be8:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-03T18:56:38,210 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=db5a5ccf5be8,34057,1733252197976 2024-12-03T18:56:38,214 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:37713/user/jenkins/test-data/3b55791f-cea7-8ac8-db4f-7cb0af809f0f/hbase.id] with ID: 9e7eda1e-9607-43be-8b4d-e26901716943 2024-12-03T18:56:38,214 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:37713/user/jenkins/test-data/3b55791f-cea7-8ac8-db4f-7cb0af809f0f/.tmp/hbase.id 2024-12-03T18:56:38,222 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34083 is added to blk_1073741826_1002 (size=42) 2024-12-03T18:56:38,222 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41215 is added to blk_1073741826_1002 (size=42) 2024-12-03T18:56:38,223 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:37713/user/jenkins/test-data/3b55791f-cea7-8ac8-db4f-7cb0af809f0f/.tmp/hbase.id]:[hdfs://localhost:37713/user/jenkins/test-data/3b55791f-cea7-8ac8-db4f-7cb0af809f0f/hbase.id] 2024-12-03T18:56:38,237 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T18:56:38,237 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-03T18:56:38,239 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-12-03T18:56:38,250 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39923-0x1019c8da9ee0001, quorum=127.0.0.1:60004, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T18:56:38,250 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34057-0x1019c8da9ee0000, quorum=127.0.0.1:60004, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T18:56:38,260 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41215 is added to blk_1073741827_1003 (size=196) 2024-12-03T18:56:38,260 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34083 is added to blk_1073741827_1003 (size=196) 2024-12-03T18:56:38,262 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-03T18:56:38,263 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-03T18:56:38,263 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-03T18:56:38,274 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41215 is added to blk_1073741828_1004 (size=1189) 2024-12-03T18:56:38,275 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34083 is added to blk_1073741828_1004 (size=1189) 2024-12-03T18:56:38,277 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:37713/user/jenkins/test-data/3b55791f-cea7-8ac8-db4f-7cb0af809f0f/MasterData/data/master/store 2024-12-03T18:56:38,290 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34083 is added to blk_1073741829_1005 (size=34) 2024-12-03T18:56:38,290 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41215 is added to blk_1073741829_1005 (size=34) 2024-12-03T18:56:38,291 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T18:56:38,291 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-03T18:56:38,291 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T18:56:38,291 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T18:56:38,291 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-03T18:56:38,292 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T18:56:38,292 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T18:56:38,292 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733252198291Disabling compacts and flushes for region at 1733252198291Disabling writes for close at 1733252198291Writing region close event to WAL at 1733252198292 (+1 ms)Closed at 1733252198292 2024-12-03T18:56:38,292 WARN [master/db5a5ccf5be8:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:37713/user/jenkins/test-data/3b55791f-cea7-8ac8-db4f-7cb0af809f0f/MasterData/data/master/store/.initializing 2024-12-03T18:56:38,293 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:37713/user/jenkins/test-data/3b55791f-cea7-8ac8-db4f-7cb0af809f0f/MasterData/WALs/db5a5ccf5be8,34057,1733252197976 2024-12-03T18:56:38,295 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=db5a5ccf5be8%2C34057%2C1733252197976, suffix=, logDir=hdfs://localhost:37713/user/jenkins/test-data/3b55791f-cea7-8ac8-db4f-7cb0af809f0f/MasterData/WALs/db5a5ccf5be8,34057,1733252197976, archiveDir=hdfs://localhost:37713/user/jenkins/test-data/3b55791f-cea7-8ac8-db4f-7cb0af809f0f/MasterData/oldWALs, maxLogs=10 2024-12-03T18:56:38,295 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor db5a5ccf5be8%2C34057%2C1733252197976.1733252198295 2024-12-03T18:56:38,300 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/3b55791f-cea7-8ac8-db4f-7cb0af809f0f/MasterData/WALs/db5a5ccf5be8,34057,1733252197976/db5a5ccf5be8%2C34057%2C1733252197976.1733252198295 2024-12-03T18:56:38,301 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45671:45671),(127.0.0.1/127.0.0.1:35857:35857)] 2024-12-03T18:56:38,302 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-03T18:56:38,302 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T18:56:38,302 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-03T18:56:38,302 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-03T18:56:38,303 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-03T18:56:38,304 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-03T18:56:38,304 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T18:56:38,305 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T18:56:38,305 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-03T18:56:38,306 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-03T18:56:38,306 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T18:56:38,306 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T18:56:38,307 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-03T18:56:38,308 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-03T18:56:38,308 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T18:56:38,308 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T18:56:38,308 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-03T18:56:38,310 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-03T18:56:38,310 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T18:56:38,310 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T18:56:38,310 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-03T18:56:38,311 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37713/user/jenkins/test-data/3b55791f-cea7-8ac8-db4f-7cb0af809f0f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-03T18:56:38,312 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37713/user/jenkins/test-data/3b55791f-cea7-8ac8-db4f-7cb0af809f0f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-03T18:56:38,313 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-03T18:56:38,313 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-03T18:56:38,314 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-03T18:56:38,315 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-03T18:56:38,317 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37713/user/jenkins/test-data/3b55791f-cea7-8ac8-db4f-7cb0af809f0f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-03T18:56:38,318 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=787206, jitterRate=9.852051734924316E-4}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-03T18:56:38,319 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733252198302Initializing all the Stores at 1733252198303 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733252198303Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733252198303Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733252198303Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733252198303Cleaning up temporary data from old regions at 1733252198313 (+10 ms)Region opened successfully at 1733252198319 (+6 ms) 2024-12-03T18:56:38,320 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-03T18:56:38,324 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@62778999, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=db5a5ccf5be8/172.17.0.2:0 2024-12-03T18:56:38,325 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-03T18:56:38,325 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-03T18:56:38,325 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-03T18:56:38,325 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-03T18:56:38,326 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-12-03T18:56:38,326 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-12-03T18:56:38,326 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-03T18:56:38,328 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-03T18:56:38,329 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34057-0x1019c8da9ee0000, quorum=127.0.0.1:60004, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-03T18:56:38,336 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-03T18:56:38,337 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-03T18:56:38,338 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34057-0x1019c8da9ee0000, quorum=127.0.0.1:60004, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-03T18:56:38,347 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-03T18:56:38,347 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-03T18:56:38,348 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34057-0x1019c8da9ee0000, quorum=127.0.0.1:60004, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-03T18:56:38,357 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-03T18:56:38,358 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34057-0x1019c8da9ee0000, quorum=127.0.0.1:60004, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-03T18:56:38,368 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-03T18:56:38,370 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34057-0x1019c8da9ee0000, quorum=127.0.0.1:60004, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-03T18:56:38,378 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-03T18:56:38,389 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34057-0x1019c8da9ee0000, quorum=127.0.0.1:60004, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-03T18:56:38,389 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39923-0x1019c8da9ee0001, quorum=127.0.0.1:60004, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-03T18:56:38,389 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39923-0x1019c8da9ee0001, quorum=127.0.0.1:60004, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T18:56:38,389 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34057-0x1019c8da9ee0000, quorum=127.0.0.1:60004, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T18:56:38,389 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=db5a5ccf5be8,34057,1733252197976, sessionid=0x1019c8da9ee0000, setting cluster-up flag (Was=false) 2024-12-03T18:56:38,400 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,44293,1733252112865/db5a5ccf5be8%2C44293%2C1733252112865.1733252113091 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:56:38,410 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39923-0x1019c8da9ee0001, quorum=127.0.0.1:60004, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T18:56:38,410 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34057-0x1019c8da9ee0000, quorum=127.0.0.1:60004, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T18:56:38,436 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.meta.1733252112663.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:56:38,442 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-03T18:56:38,443 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=db5a5ccf5be8,34057,1733252197976 2024-12-03T18:56:38,463 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34057-0x1019c8da9ee0000, quorum=127.0.0.1:60004, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T18:56:38,463 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39923-0x1019c8da9ee0001, quorum=127.0.0.1:60004, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T18:56:38,494 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-03T18:56:38,495 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=db5a5ccf5be8,34057,1733252197976 2024-12-03T18:56:38,496 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:37713/user/jenkins/test-data/3b55791f-cea7-8ac8-db4f-7cb0af809f0f/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-03T18:56:38,497 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-03T18:56:38,497 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-03T18:56:38,498 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-03T18:56:38,498 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: db5a5ccf5be8,34057,1733252197976 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-03T18:56:38,499 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/db5a5ccf5be8:0, corePoolSize=5, maxPoolSize=5 2024-12-03T18:56:38,499 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/db5a5ccf5be8:0, corePoolSize=5, maxPoolSize=5 2024-12-03T18:56:38,499 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/db5a5ccf5be8:0, corePoolSize=5, maxPoolSize=5 2024-12-03T18:56:38,499 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/db5a5ccf5be8:0, corePoolSize=5, maxPoolSize=5 2024-12-03T18:56:38,499 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/db5a5ccf5be8:0, corePoolSize=10, maxPoolSize=10 2024-12-03T18:56:38,499 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/db5a5ccf5be8:0, corePoolSize=1, maxPoolSize=1 2024-12-03T18:56:38,499 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/db5a5ccf5be8:0, corePoolSize=2, maxPoolSize=2 2024-12-03T18:56:38,499 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/db5a5ccf5be8:0, corePoolSize=1, maxPoolSize=1 2024-12-03T18:56:38,501 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-03T18:56:38,501 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-03T18:56:38,502 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733252228501 2024-12-03T18:56:38,502 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-03T18:56:38,502 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-03T18:56:38,502 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-03T18:56:38,502 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-03T18:56:38,502 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-03T18:56:38,502 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-03T18:56:38,502 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T18:56:38,502 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-03T18:56:38,502 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-03T18:56:38,502 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-03T18:56:38,502 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-03T18:56:38,502 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-03T18:56:38,503 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-03T18:56:38,503 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-03T18:56:38,503 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/db5a5ccf5be8:0:becomeActiveMaster-HFileCleaner.large.0-1733252198503,5,FailOnTimeoutGroup] 2024-12-03T18:56:38,504 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/db5a5ccf5be8:0:becomeActiveMaster-HFileCleaner.small.0-1733252198503,5,FailOnTimeoutGroup] 2024-12-03T18:56:38,504 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-03T18:56:38,504 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-03T18:56:38,504 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-03T18:56:38,504 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-03T18:56:38,511 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41215 is added to blk_1073741831_1007 (size=1321) 2024-12-03T18:56:38,511 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34083 is added to blk_1073741831_1007 (size=1321) 2024-12-03T18:56:38,512 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:37713/user/jenkins/test-data/3b55791f-cea7-8ac8-db4f-7cb0af809f0f/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-03T18:56:38,512 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:37713/user/jenkins/test-data/3b55791f-cea7-8ac8-db4f-7cb0af809f0f 2024-12-03T18:56:38,518 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34083 is added to blk_1073741832_1008 (size=32) 2024-12-03T18:56:38,518 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41215 is added to blk_1073741832_1008 (size=32) 2024-12-03T18:56:38,519 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T18:56:38,520 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-03T18:56:38,521 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-03T18:56:38,521 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T18:56:38,521 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T18:56:38,521 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-03T18:56:38,522 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-03T18:56:38,522 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T18:56:38,523 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T18:56:38,523 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-03T18:56:38,524 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-03T18:56:38,524 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T18:56:38,524 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T18:56:38,525 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-03T18:56:38,525 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-03T18:56:38,525 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T18:56:38,526 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T18:56:38,526 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-03T18:56:38,527 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37713/user/jenkins/test-data/3b55791f-cea7-8ac8-db4f-7cb0af809f0f/data/hbase/meta/1588230740 2024-12-03T18:56:38,527 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37713/user/jenkins/test-data/3b55791f-cea7-8ac8-db4f-7cb0af809f0f/data/hbase/meta/1588230740 2024-12-03T18:56:38,528 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-03T18:56:38,528 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-03T18:56:38,529 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-03T18:56:38,530 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-03T18:56:38,532 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37713/user/jenkins/test-data/3b55791f-cea7-8ac8-db4f-7cb0af809f0f/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-03T18:56:38,532 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=881619, jitterRate=0.12103667855262756}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-03T18:56:38,533 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733252198519Initializing all the Stores at 1733252198519Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733252198519Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733252198520 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733252198520Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733252198520Cleaning up temporary data from old regions at 1733252198528 (+8 ms)Region opened successfully at 1733252198533 (+5 ms) 2024-12-03T18:56:38,533 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-03T18:56:38,533 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-03T18:56:38,533 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-03T18:56:38,533 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-03T18:56:38,533 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-03T18:56:38,538 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-03T18:56:38,539 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733252198533Disabling compacts and flushes for region at 1733252198533Disabling writes for close at 1733252198533Writing region close event to WAL at 1733252198538 (+5 ms)Closed at 1733252198538 2024-12-03T18:56:38,540 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-03T18:56:38,540 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-03T18:56:38,540 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-03T18:56:38,541 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-03T18:56:38,542 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-03T18:56:38,564 INFO [RS:0;db5a5ccf5be8:39923 {}] regionserver.HRegionServer(746): ClusterId : 9e7eda1e-9607-43be-8b4d-e26901716943 2024-12-03T18:56:38,564 DEBUG [RS:0;db5a5ccf5be8:39923 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-03T18:56:38,577 DEBUG [RS:0;db5a5ccf5be8:39923 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-03T18:56:38,577 DEBUG [RS:0;db5a5ccf5be8:39923 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-03T18:56:38,590 DEBUG [RS:0;db5a5ccf5be8:39923 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-03T18:56:38,590 DEBUG [RS:0;db5a5ccf5be8:39923 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@45582ba1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=db5a5ccf5be8/172.17.0.2:0 2024-12-03T18:56:38,601 DEBUG [RS:0;db5a5ccf5be8:39923 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;db5a5ccf5be8:39923 2024-12-03T18:56:38,602 INFO [RS:0;db5a5ccf5be8:39923 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-03T18:56:38,602 INFO [RS:0;db5a5ccf5be8:39923 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-03T18:56:38,602 DEBUG [RS:0;db5a5ccf5be8:39923 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-03T18:56:38,603 INFO [RS:0;db5a5ccf5be8:39923 {}] regionserver.HRegionServer(2659): reportForDuty to master=db5a5ccf5be8,34057,1733252197976 with port=39923, startcode=1733252198139 2024-12-03T18:56:38,603 DEBUG [RS:0;db5a5ccf5be8:39923 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-03T18:56:38,604 INFO [HMaster-EventLoopGroup-12-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54287, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.5 (auth:SIMPLE), service=RegionServerStatusService 2024-12-03T18:56:38,605 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34057 {}] master.ServerManager(363): Checking decommissioned status of RegionServer db5a5ccf5be8,39923,1733252198139 2024-12-03T18:56:38,605 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34057 {}] master.ServerManager(517): Registering regionserver=db5a5ccf5be8,39923,1733252198139 2024-12-03T18:56:38,607 DEBUG [RS:0;db5a5ccf5be8:39923 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:37713/user/jenkins/test-data/3b55791f-cea7-8ac8-db4f-7cb0af809f0f 2024-12-03T18:56:38,607 DEBUG [RS:0;db5a5ccf5be8:39923 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:37713 2024-12-03T18:56:38,607 DEBUG [RS:0;db5a5ccf5be8:39923 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-03T18:56:38,618 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34057-0x1019c8da9ee0000, quorum=127.0.0.1:60004, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-03T18:56:38,618 DEBUG [RS:0;db5a5ccf5be8:39923 {}] zookeeper.ZKUtil(111): regionserver:39923-0x1019c8da9ee0001, quorum=127.0.0.1:60004, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/db5a5ccf5be8,39923,1733252198139 2024-12-03T18:56:38,619 WARN [RS:0;db5a5ccf5be8:39923 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-03T18:56:38,619 INFO [RS:0;db5a5ccf5be8:39923 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-03T18:56:38,619 DEBUG [RS:0;db5a5ccf5be8:39923 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:37713/user/jenkins/test-data/3b55791f-cea7-8ac8-db4f-7cb0af809f0f/WALs/db5a5ccf5be8,39923,1733252198139 2024-12-03T18:56:38,619 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [db5a5ccf5be8,39923,1733252198139] 2024-12-03T18:56:38,622 INFO [RS:0;db5a5ccf5be8:39923 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-03T18:56:38,623 INFO [RS:0;db5a5ccf5be8:39923 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-03T18:56:38,623 INFO [RS:0;db5a5ccf5be8:39923 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-03T18:56:38,623 INFO [RS:0;db5a5ccf5be8:39923 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-03T18:56:38,623 INFO [RS:0;db5a5ccf5be8:39923 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-03T18:56:38,624 INFO [RS:0;db5a5ccf5be8:39923 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-03T18:56:38,624 INFO [RS:0;db5a5ccf5be8:39923 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-03T18:56:38,624 DEBUG [RS:0;db5a5ccf5be8:39923 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/db5a5ccf5be8:0, corePoolSize=1, maxPoolSize=1 2024-12-03T18:56:38,624 DEBUG [RS:0;db5a5ccf5be8:39923 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/db5a5ccf5be8:0, corePoolSize=1, maxPoolSize=1 2024-12-03T18:56:38,624 DEBUG [RS:0;db5a5ccf5be8:39923 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/db5a5ccf5be8:0, corePoolSize=1, maxPoolSize=1 2024-12-03T18:56:38,624 DEBUG [RS:0;db5a5ccf5be8:39923 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/db5a5ccf5be8:0, corePoolSize=1, maxPoolSize=1 2024-12-03T18:56:38,624 DEBUG [RS:0;db5a5ccf5be8:39923 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/db5a5ccf5be8:0, corePoolSize=1, maxPoolSize=1 2024-12-03T18:56:38,624 DEBUG [RS:0;db5a5ccf5be8:39923 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/db5a5ccf5be8:0, corePoolSize=2, maxPoolSize=2 2024-12-03T18:56:38,625 DEBUG [RS:0;db5a5ccf5be8:39923 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/db5a5ccf5be8:0, corePoolSize=1, maxPoolSize=1 2024-12-03T18:56:38,625 DEBUG [RS:0;db5a5ccf5be8:39923 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/db5a5ccf5be8:0, corePoolSize=1, maxPoolSize=1 2024-12-03T18:56:38,625 DEBUG [RS:0;db5a5ccf5be8:39923 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/db5a5ccf5be8:0, corePoolSize=1, maxPoolSize=1 2024-12-03T18:56:38,625 DEBUG [RS:0;db5a5ccf5be8:39923 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/db5a5ccf5be8:0, corePoolSize=1, maxPoolSize=1 2024-12-03T18:56:38,625 DEBUG [RS:0;db5a5ccf5be8:39923 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/db5a5ccf5be8:0, corePoolSize=1, maxPoolSize=1 2024-12-03T18:56:38,625 DEBUG [RS:0;db5a5ccf5be8:39923 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/db5a5ccf5be8:0, corePoolSize=1, maxPoolSize=1 2024-12-03T18:56:38,625 DEBUG [RS:0;db5a5ccf5be8:39923 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/db5a5ccf5be8:0, corePoolSize=3, maxPoolSize=3 2024-12-03T18:56:38,625 DEBUG [RS:0;db5a5ccf5be8:39923 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/db5a5ccf5be8:0, corePoolSize=3, maxPoolSize=3 2024-12-03T18:56:38,625 INFO [RS:0;db5a5ccf5be8:39923 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-03T18:56:38,625 INFO [RS:0;db5a5ccf5be8:39923 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-03T18:56:38,625 INFO [RS:0;db5a5ccf5be8:39923 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-03T18:56:38,625 INFO [RS:0;db5a5ccf5be8:39923 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-03T18:56:38,625 INFO [RS:0;db5a5ccf5be8:39923 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-03T18:56:38,625 INFO [RS:0;db5a5ccf5be8:39923 {}] hbase.ChoreService(168): Chore ScheduledChore name=db5a5ccf5be8,39923,1733252198139-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-03T18:56:38,638 INFO [RS:0;db5a5ccf5be8:39923 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-03T18:56:38,639 INFO [RS:0;db5a5ccf5be8:39923 {}] hbase.ChoreService(168): Chore ScheduledChore name=db5a5ccf5be8,39923,1733252198139-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-03T18:56:38,639 INFO [RS:0;db5a5ccf5be8:39923 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T18:56:38,639 INFO [RS:0;db5a5ccf5be8:39923 {}] regionserver.Replication(171): db5a5ccf5be8,39923,1733252198139 started 2024-12-03T18:56:38,652 INFO [RS:0;db5a5ccf5be8:39923 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T18:56:38,652 INFO [RS:0;db5a5ccf5be8:39923 {}] regionserver.HRegionServer(1482): Serving as db5a5ccf5be8,39923,1733252198139, RpcServer on db5a5ccf5be8/172.17.0.2:39923, sessionid=0x1019c8da9ee0001 2024-12-03T18:56:38,652 DEBUG [RS:0;db5a5ccf5be8:39923 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-03T18:56:38,652 DEBUG [RS:0;db5a5ccf5be8:39923 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager db5a5ccf5be8,39923,1733252198139 2024-12-03T18:56:38,652 DEBUG [RS:0;db5a5ccf5be8:39923 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'db5a5ccf5be8,39923,1733252198139' 2024-12-03T18:56:38,652 DEBUG [RS:0;db5a5ccf5be8:39923 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-03T18:56:38,653 DEBUG [RS:0;db5a5ccf5be8:39923 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-03T18:56:38,654 DEBUG [RS:0;db5a5ccf5be8:39923 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-03T18:56:38,654 DEBUG [RS:0;db5a5ccf5be8:39923 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-03T18:56:38,654 DEBUG [RS:0;db5a5ccf5be8:39923 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager db5a5ccf5be8,39923,1733252198139 2024-12-03T18:56:38,654 DEBUG [RS:0;db5a5ccf5be8:39923 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'db5a5ccf5be8,39923,1733252198139' 2024-12-03T18:56:38,654 DEBUG [RS:0;db5a5ccf5be8:39923 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-03T18:56:38,654 DEBUG [RS:0;db5a5ccf5be8:39923 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-03T18:56:38,654 DEBUG [RS:0;db5a5ccf5be8:39923 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-03T18:56:38,654 INFO [RS:0;db5a5ccf5be8:39923 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-03T18:56:38,654 INFO [RS:0;db5a5ccf5be8:39923 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-03T18:56:38,693 WARN [db5a5ccf5be8:34057 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-03T18:56:38,756 INFO [RS:0;db5a5ccf5be8:39923 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=db5a5ccf5be8%2C39923%2C1733252198139, suffix=, logDir=hdfs://localhost:37713/user/jenkins/test-data/3b55791f-cea7-8ac8-db4f-7cb0af809f0f/WALs/db5a5ccf5be8,39923,1733252198139, archiveDir=hdfs://localhost:37713/user/jenkins/test-data/3b55791f-cea7-8ac8-db4f-7cb0af809f0f/oldWALs, maxLogs=32 2024-12-03T18:56:38,757 INFO [RS:0;db5a5ccf5be8:39923 {}] monitor.StreamSlowMonitor(122): New stream slow monitor db5a5ccf5be8%2C39923%2C1733252198139.1733252198757 2024-12-03T18:56:38,765 INFO [RS:0;db5a5ccf5be8:39923 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/3b55791f-cea7-8ac8-db4f-7cb0af809f0f/WALs/db5a5ccf5be8,39923,1733252198139/db5a5ccf5be8%2C39923%2C1733252198139.1733252198757 2024-12-03T18:56:38,772 DEBUG [RS:0;db5a5ccf5be8:39923 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35857:35857),(127.0.0.1/127.0.0.1:45671:45671)] 2024-12-03T18:56:38,943 DEBUG [db5a5ccf5be8:34057 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-03T18:56:38,943 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=db5a5ccf5be8,39923,1733252198139 2024-12-03T18:56:38,945 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as db5a5ccf5be8,39923,1733252198139, state=OPENING 2024-12-03T18:56:38,989 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-03T18:56:38,999 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34057-0x1019c8da9ee0000, quorum=127.0.0.1:60004, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T18:56:38,999 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39923-0x1019c8da9ee0001, quorum=127.0.0.1:60004, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T18:56:39,000 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-03T18:56:39,000 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-03T18:56:39,000 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-03T18:56:39,000 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=db5a5ccf5be8,39923,1733252198139}] 2024-12-03T18:56:39,153 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-03T18:56:39,155 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37529, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-03T18:56:39,159 INFO [RS_OPEN_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-03T18:56:39,159 INFO [RS_OPEN_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-03T18:56:39,160 INFO [RS_OPEN_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=db5a5ccf5be8%2C39923%2C1733252198139.meta, suffix=.meta, logDir=hdfs://localhost:37713/user/jenkins/test-data/3b55791f-cea7-8ac8-db4f-7cb0af809f0f/WALs/db5a5ccf5be8,39923,1733252198139, archiveDir=hdfs://localhost:37713/user/jenkins/test-data/3b55791f-cea7-8ac8-db4f-7cb0af809f0f/oldWALs, maxLogs=32 2024-12-03T18:56:39,161 INFO [RS_OPEN_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor db5a5ccf5be8%2C39923%2C1733252198139.meta.1733252199161.meta 2024-12-03T18:56:39,166 INFO [RS_OPEN_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/3b55791f-cea7-8ac8-db4f-7cb0af809f0f/WALs/db5a5ccf5be8,39923,1733252198139/db5a5ccf5be8%2C39923%2C1733252198139.meta.1733252199161.meta 2024-12-03T18:56:39,166 DEBUG [RS_OPEN_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35857:35857),(127.0.0.1/127.0.0.1:45671:45671)] 2024-12-03T18:56:39,167 DEBUG [RS_OPEN_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-03T18:56:39,167 DEBUG [RS_OPEN_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-03T18:56:39,167 DEBUG [RS_OPEN_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-03T18:56:39,167 INFO [RS_OPEN_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-03T18:56:39,167 DEBUG [RS_OPEN_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-03T18:56:39,168 DEBUG [RS_OPEN_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T18:56:39,168 DEBUG [RS_OPEN_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-03T18:56:39,168 DEBUG [RS_OPEN_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-03T18:56:39,169 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-03T18:56:39,170 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-03T18:56:39,170 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T18:56:39,170 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T18:56:39,171 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-03T18:56:39,171 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-03T18:56:39,171 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T18:56:39,172 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T18:56:39,172 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-03T18:56:39,173 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-03T18:56:39,173 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T18:56:39,174 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T18:56:39,174 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-03T18:56:39,175 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-03T18:56:39,175 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T18:56:39,175 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T18:56:39,175 DEBUG [RS_OPEN_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-03T18:56:39,176 DEBUG [RS_OPEN_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37713/user/jenkins/test-data/3b55791f-cea7-8ac8-db4f-7cb0af809f0f/data/hbase/meta/1588230740 2024-12-03T18:56:39,177 DEBUG [RS_OPEN_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37713/user/jenkins/test-data/3b55791f-cea7-8ac8-db4f-7cb0af809f0f/data/hbase/meta/1588230740 2024-12-03T18:56:39,178 DEBUG [RS_OPEN_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-03T18:56:39,178 DEBUG [RS_OPEN_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-03T18:56:39,178 DEBUG [RS_OPEN_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-03T18:56:39,180 DEBUG [RS_OPEN_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-03T18:56:39,180 INFO [RS_OPEN_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=772669, jitterRate=-0.017500966787338257}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-03T18:56:39,181 DEBUG [RS_OPEN_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-03T18:56:39,181 DEBUG [RS_OPEN_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733252199168Writing region info on filesystem at 1733252199168Initializing all the Stores at 1733252199168Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733252199168Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733252199169 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733252199169Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733252199169Cleaning up temporary data from old regions at 1733252199178 (+9 ms)Running coprocessor post-open hooks at 1733252199181 (+3 ms)Region opened successfully at 1733252199181 2024-12-03T18:56:39,182 INFO [RS_OPEN_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733252199153 2024-12-03T18:56:39,185 DEBUG [RS_OPEN_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-03T18:56:39,185 INFO [RS_OPEN_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-03T18:56:39,186 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=db5a5ccf5be8,39923,1733252198139 2024-12-03T18:56:39,187 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as db5a5ccf5be8,39923,1733252198139, state=OPEN 2024-12-03T18:56:39,230 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39923-0x1019c8da9ee0001, quorum=127.0.0.1:60004, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-03T18:56:39,230 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34057-0x1019c8da9ee0000, quorum=127.0.0.1:60004, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-03T18:56:39,230 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=db5a5ccf5be8,39923,1733252198139 2024-12-03T18:56:39,230 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-03T18:56:39,230 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-03T18:56:39,234 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-03T18:56:39,234 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=db5a5ccf5be8,39923,1733252198139 in 230 msec 2024-12-03T18:56:39,236 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-03T18:56:39,236 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 694 msec 2024-12-03T18:56:39,237 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-03T18:56:39,237 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-03T18:56:39,239 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-03T18:56:39,239 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=db5a5ccf5be8,39923,1733252198139, seqNum=-1] 2024-12-03T18:56:39,239 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-03T18:56:39,241 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52815, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-03T18:56:39,248 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 749 msec 2024-12-03T18:56:39,248 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733252199248, completionTime=-1 2024-12-03T18:56:39,248 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-03T18:56:39,248 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-03T18:56:39,250 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-12-03T18:56:39,250 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733252259250 2024-12-03T18:56:39,250 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733252319250 2024-12-03T18:56:39,250 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-12-03T18:56:39,251 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=db5a5ccf5be8,34057,1733252197976-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-03T18:56:39,251 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=db5a5ccf5be8,34057,1733252197976-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T18:56:39,251 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=db5a5ccf5be8,34057,1733252197976-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T18:56:39,251 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-db5a5ccf5be8:34057, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T18:56:39,251 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-03T18:56:39,251 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-03T18:56:39,253 DEBUG [master/db5a5ccf5be8:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-03T18:56:39,255 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.045sec 2024-12-03T18:56:39,256 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-03T18:56:39,256 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-03T18:56:39,256 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-03T18:56:39,256 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-03T18:56:39,256 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-03T18:56:39,256 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=db5a5ccf5be8,34057,1733252197976-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-03T18:56:39,256 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=db5a5ccf5be8,34057,1733252197976-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-03T18:56:39,259 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-03T18:56:39,259 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-03T18:56:39,259 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=db5a5ccf5be8,34057,1733252197976-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T18:56:39,264 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6fe31621, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T18:56:39,264 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request db5a5ccf5be8,34057,-1 for getting cluster id 2024-12-03T18:56:39,264 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-03T18:56:39,266 DEBUG [HMaster-EventLoopGroup-12-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '9e7eda1e-9607-43be-8b4d-e26901716943' 2024-12-03T18:56:39,266 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-03T18:56:39,266 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "9e7eda1e-9607-43be-8b4d-e26901716943" 2024-12-03T18:56:39,267 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6021364a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T18:56:39,267 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [db5a5ccf5be8,34057,-1] 2024-12-03T18:56:39,267 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-03T18:56:39,267 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T18:56:39,268 INFO [HMaster-EventLoopGroup-12-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40666, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-03T18:56:39,269 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@65840522, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T18:56:39,270 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-03T18:56:39,271 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=db5a5ccf5be8,39923,1733252198139, seqNum=-1] 2024-12-03T18:56:39,271 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-03T18:56:39,272 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54358, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-03T18:56:39,275 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=db5a5ccf5be8,34057,1733252197976 2024-12-03T18:56:39,275 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T18:56:39,278 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-12-03T18:56:39,278 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-03T18:56:39,279 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.AsyncConnectionImpl(321): The fetched master address is db5a5ccf5be8,34057,1733252197976 2024-12-03T18:56:39,279 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@2a113252 2024-12-03T18:56:39,279 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-03T18:56:39,280 INFO [HMaster-EventLoopGroup-12-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40682, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-03T18:56:39,280 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34057 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-12-03T18:56:39,280 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34057 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-12-03T18:56:39,281 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34057 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testCompactionRecordDoesntBlockRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-03T18:56:39,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34057 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-03T18:56:39,283 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-12-03T18:56:39,283 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T18:56:39,284 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34057 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testCompactionRecordDoesntBlockRolling" procId is: 4 2024-12-03T18:56:39,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34057 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-03T18:56:39,285 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-03T18:56:39,292 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41215 is added to blk_1073741835_1011 (size=405) 2024-12-03T18:56:39,292 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34083 is added to blk_1073741835_1011 (size=405) 2024-12-03T18:56:39,294 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 34bc197540be118162007a076a6157bd, NAME => 'TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733252199280.34bc197540be118162007a076a6157bd.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testCompactionRecordDoesntBlockRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37713/user/jenkins/test-data/3b55791f-cea7-8ac8-db4f-7cb0af809f0f 2024-12-03T18:56:39,300 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34083 is added to blk_1073741836_1012 (size=88) 2024-12-03T18:56:39,300 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41215 is added to blk_1073741836_1012 (size=88) 2024-12-03T18:56:39,301 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733252199280.34bc197540be118162007a076a6157bd.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T18:56:39,301 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1722): Closing 34bc197540be118162007a076a6157bd, disabling compactions & flushes 2024-12-03T18:56:39,301 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733252199280.34bc197540be118162007a076a6157bd. 2024-12-03T18:56:39,301 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733252199280.34bc197540be118162007a076a6157bd. 2024-12-03T18:56:39,301 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733252199280.34bc197540be118162007a076a6157bd. after waiting 0 ms 2024-12-03T18:56:39,301 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733252199280.34bc197540be118162007a076a6157bd. 2024-12-03T18:56:39,301 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733252199280.34bc197540be118162007a076a6157bd. 2024-12-03T18:56:39,301 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for 34bc197540be118162007a076a6157bd: Waiting for close lock at 1733252199301Disabling compacts and flushes for region at 1733252199301Disabling writes for close at 1733252199301Writing region close event to WAL at 1733252199301Closed at 1733252199301 2024-12-03T18:56:39,302 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_ADD_TO_META 2024-12-03T18:56:39,302 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733252199280.34bc197540be118162007a076a6157bd.","families":{"info":[{"qualifier":"regioninfo","vlen":87,"tag":[],"timestamp":"1733252199302"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733252199302"}]},"ts":"1733252199302"} 2024-12-03T18:56:39,305 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-03T18:56:39,306 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-03T18:56:39,306 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733252199306"}]},"ts":"1733252199306"} 2024-12-03T18:56:39,309 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testCompactionRecordDoesntBlockRolling, state=ENABLING in hbase:meta 2024-12-03T18:56:39,309 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=34bc197540be118162007a076a6157bd, ASSIGN}] 2024-12-03T18:56:39,311 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=34bc197540be118162007a076a6157bd, ASSIGN 2024-12-03T18:56:39,312 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=34bc197540be118162007a076a6157bd, ASSIGN; state=OFFLINE, location=db5a5ccf5be8,39923,1733252198139; forceNewPlan=false, retain=false 2024-12-03T18:56:39,401 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,44293,1733252112865/db5a5ccf5be8%2C44293%2C1733252112865.1733252113091 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:56:39,437 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.meta.1733252112663.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:56:39,463 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=34bc197540be118162007a076a6157bd, regionState=OPENING, regionLocation=db5a5ccf5be8,39923,1733252198139 2024-12-03T18:56:39,466 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-13-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=34bc197540be118162007a076a6157bd, ASSIGN because future has completed 2024-12-03T18:56:39,466 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 34bc197540be118162007a076a6157bd, server=db5a5ccf5be8,39923,1733252198139}] 2024-12-03T18:56:39,623 INFO [RS_OPEN_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733252199280.34bc197540be118162007a076a6157bd. 2024-12-03T18:56:39,623 DEBUG [RS_OPEN_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 34bc197540be118162007a076a6157bd, NAME => 'TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733252199280.34bc197540be118162007a076a6157bd.', STARTKEY => '', ENDKEY => ''} 2024-12-03T18:56:39,624 DEBUG [RS_OPEN_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testCompactionRecordDoesntBlockRolling 34bc197540be118162007a076a6157bd 2024-12-03T18:56:39,624 DEBUG [RS_OPEN_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733252199280.34bc197540be118162007a076a6157bd.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T18:56:39,624 DEBUG [RS_OPEN_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 34bc197540be118162007a076a6157bd 2024-12-03T18:56:39,624 DEBUG [RS_OPEN_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 34bc197540be118162007a076a6157bd 2024-12-03T18:56:39,625 INFO [StoreOpener-34bc197540be118162007a076a6157bd-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 34bc197540be118162007a076a6157bd 2024-12-03T18:56:39,626 INFO [StoreOpener-34bc197540be118162007a076a6157bd-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 34bc197540be118162007a076a6157bd columnFamilyName info 2024-12-03T18:56:39,626 DEBUG [StoreOpener-34bc197540be118162007a076a6157bd-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T18:56:39,627 INFO [StoreOpener-34bc197540be118162007a076a6157bd-1 {}] regionserver.HStore(327): Store=34bc197540be118162007a076a6157bd/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T18:56:39,627 DEBUG [RS_OPEN_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 34bc197540be118162007a076a6157bd 2024-12-03T18:56:39,627 DEBUG [RS_OPEN_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37713/user/jenkins/test-data/3b55791f-cea7-8ac8-db4f-7cb0af809f0f/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/34bc197540be118162007a076a6157bd 2024-12-03T18:56:39,628 DEBUG [RS_OPEN_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37713/user/jenkins/test-data/3b55791f-cea7-8ac8-db4f-7cb0af809f0f/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/34bc197540be118162007a076a6157bd 2024-12-03T18:56:39,628 DEBUG [RS_OPEN_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 34bc197540be118162007a076a6157bd 2024-12-03T18:56:39,628 DEBUG [RS_OPEN_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 34bc197540be118162007a076a6157bd 2024-12-03T18:56:39,629 DEBUG [RS_OPEN_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 34bc197540be118162007a076a6157bd 2024-12-03T18:56:39,632 DEBUG [RS_OPEN_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37713/user/jenkins/test-data/3b55791f-cea7-8ac8-db4f-7cb0af809f0f/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/34bc197540be118162007a076a6157bd/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-03T18:56:39,633 INFO [RS_OPEN_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 34bc197540be118162007a076a6157bd; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=700852, jitterRate=-0.10882088541984558}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-03T18:56:39,633 DEBUG [RS_OPEN_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 34bc197540be118162007a076a6157bd 2024-12-03T18:56:39,633 DEBUG [RS_OPEN_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 34bc197540be118162007a076a6157bd: Running coprocessor pre-open hook at 1733252199624Writing region info on filesystem at 1733252199624Initializing all the Stores at 1733252199624Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733252199625 (+1 ms)Cleaning up temporary data from old regions at 1733252199628 (+3 ms)Running coprocessor post-open hooks at 1733252199633 (+5 ms)Region opened successfully at 1733252199633 2024-12-03T18:56:39,634 INFO [RS_OPEN_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733252199280.34bc197540be118162007a076a6157bd., pid=6, masterSystemTime=1733252199619 2024-12-03T18:56:39,637 DEBUG [RS_OPEN_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733252199280.34bc197540be118162007a076a6157bd. 2024-12-03T18:56:39,637 INFO [RS_OPEN_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733252199280.34bc197540be118162007a076a6157bd. 2024-12-03T18:56:39,638 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=34bc197540be118162007a076a6157bd, regionState=OPEN, openSeqNum=2, regionLocation=db5a5ccf5be8,39923,1733252198139 2024-12-03T18:56:39,640 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-13-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 34bc197540be118162007a076a6157bd, server=db5a5ccf5be8,39923,1733252198139 because future has completed 2024-12-03T18:56:39,645 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-12-03T18:56:39,645 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 34bc197540be118162007a076a6157bd, server=db5a5ccf5be8,39923,1733252198139 in 176 msec 2024-12-03T18:56:39,648 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-12-03T18:56:39,648 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=34bc197540be118162007a076a6157bd, ASSIGN in 336 msec 2024-12-03T18:56:39,649 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-03T18:56:39,649 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733252199649"}]},"ts":"1733252199649"} 2024-12-03T18:56:39,652 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testCompactionRecordDoesntBlockRolling, state=ENABLED in hbase:meta 2024-12-03T18:56:39,654 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_POST_OPERATION 2024-12-03T18:56:39,656 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 373 msec 2024-12-03T18:56:40,401 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,44293,1733252112865/db5a5ccf5be8%2C44293%2C1733252112865.1733252113091 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:56:40,437 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.meta.1733252112663.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:56:41,402 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,44293,1733252112865/db5a5ccf5be8%2C44293%2C1733252112865.1733252113091 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:56:41,438 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.meta.1733252112663.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:56:42,402 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,44293,1733252112865/db5a5ccf5be8%2C44293%2C1733252112865.1733252113091 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:56:42,438 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.meta.1733252112663.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:56:42,876 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:56:42,876 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:56:42,876 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:56:42,876 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:56:42,877 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:56:42,877 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:56:42,889 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:56:42,890 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:56:42,890 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:56:42,890 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:56:42,890 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:56:42,890 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:56:42,893 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:56:42,893 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:56:42,893 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:56:42,895 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:56:43,398 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-03T18:56:43,399 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:56:43,399 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:56:43,399 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:56:43,400 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:56:43,400 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:56:43,400 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:56:43,403 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,44293,1733252112865/db5a5ccf5be8%2C44293%2C1733252112865.1733252113091 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:56:43,416 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:56:43,417 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:56:43,417 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:56:43,417 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:56:43,417 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:56:43,418 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:56:43,421 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:56:43,421 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:56:43,422 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:56:43,425 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:56:43,439 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.meta.1733252112663.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:56:44,404 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,44293,1733252112865/db5a5ccf5be8%2C44293%2C1733252112865.1733252113091 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:56:44,439 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.meta.1733252112663.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:56:44,622 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-03T18:56:44,622 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testCompactionRecordDoesntBlockRolling' 2024-12-03T18:56:45,405 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,44293,1733252112865/db5a5ccf5be8%2C44293%2C1733252112865.1733252113091 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:56:45,440 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.meta.1733252112663.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:56:46,406 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,44293,1733252112865/db5a5ccf5be8%2C44293%2C1733252112865.1733252113091 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:56:46,441 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.meta.1733252112663.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:56:47,407 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,44293,1733252112865/db5a5ccf5be8%2C44293%2C1733252112865.1733252113091 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:56:47,442 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.meta.1733252112663.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:56:47,875 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-03T18:56:47,876 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-12-03T18:56:47,877 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-03T18:56:47,877 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-12-03T18:56:47,878 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-12-03T18:56:47,878 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-12-03T18:56:47,879 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-03T18:56:47,879 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling Metrics about Tables on a single HBase RegionServer 2024-12-03T18:56:48,408 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,44293,1733252112865/db5a5ccf5be8%2C44293%2C1733252112865.1733252113091 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:56:48,443 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.meta.1733252112663.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:56:49,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34057 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-03T18:56:49,301 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-12-03T18:56:49,302 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testCompactionRecordDoesntBlockRolling,, stopping at row=TestLogRolling-testCompactionRecordDoesntBlockRolling ,, for max=2147483647 with caching=100 2024-12-03T18:56:49,309 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-03T18:56:49,310 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733252199280.34bc197540be118162007a076a6157bd. 2024-12-03T18:56:49,316 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testCompactionRecordDoesntBlockRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733252199280.34bc197540be118162007a076a6157bd., hostname=db5a5ccf5be8,39923,1733252198139, seqNum=2] 2024-12-03T18:56:49,322 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34057 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-03T18:56:49,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34057 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-03T18:56:49,327 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-12-03T18:56:49,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34057 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-03T18:56:49,329 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-03T18:56:49,330 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-03T18:56:49,410 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,44293,1733252112865/db5a5ccf5be8%2C44293%2C1733252112865.1733252113091 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:56:49,444 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.meta.1733252112663.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:56:49,493 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39923 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-12-03T18:56:49,494 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db5a5ccf5be8:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733252199280.34bc197540be118162007a076a6157bd. 2024-12-03T18:56:49,494 INFO [RS_FLUSH_OPERATIONS-regionserver/db5a5ccf5be8:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing 34bc197540be118162007a076a6157bd 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-12-03T18:56:49,510 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db5a5ccf5be8:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37713/user/jenkins/test-data/3b55791f-cea7-8ac8-db4f-7cb0af809f0f/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/34bc197540be118162007a076a6157bd/.tmp/info/f9d7690faeab4e7bba3ef074c9dc9d1c is 1080, key is row0001/info:/1733252209317/Put/seqid=0 2024-12-03T18:56:49,556 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41215 is added to blk_1073741837_1013 (size=6033) 2024-12-03T18:56:49,556 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34083 is added to blk_1073741837_1013 (size=6033) 2024-12-03T18:56:49,557 INFO [RS_FLUSH_OPERATIONS-regionserver/db5a5ccf5be8:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:37713/user/jenkins/test-data/3b55791f-cea7-8ac8-db4f-7cb0af809f0f/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/34bc197540be118162007a076a6157bd/.tmp/info/f9d7690faeab4e7bba3ef074c9dc9d1c 2024-12-03T18:56:49,566 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db5a5ccf5be8:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37713/user/jenkins/test-data/3b55791f-cea7-8ac8-db4f-7cb0af809f0f/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/34bc197540be118162007a076a6157bd/.tmp/info/f9d7690faeab4e7bba3ef074c9dc9d1c as hdfs://localhost:37713/user/jenkins/test-data/3b55791f-cea7-8ac8-db4f-7cb0af809f0f/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/34bc197540be118162007a076a6157bd/info/f9d7690faeab4e7bba3ef074c9dc9d1c 2024-12-03T18:56:49,570 INFO [RS_FLUSH_OPERATIONS-regionserver/db5a5ccf5be8:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37713/user/jenkins/test-data/3b55791f-cea7-8ac8-db4f-7cb0af809f0f/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/34bc197540be118162007a076a6157bd/info/f9d7690faeab4e7bba3ef074c9dc9d1c, entries=1, sequenceid=5, filesize=5.9 K 2024-12-03T18:56:49,571 INFO [RS_FLUSH_OPERATIONS-regionserver/db5a5ccf5be8:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 34bc197540be118162007a076a6157bd in 77ms, sequenceid=5, compaction requested=false 2024-12-03T18:56:49,571 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db5a5ccf5be8:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for 34bc197540be118162007a076a6157bd: 2024-12-03T18:56:49,571 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db5a5ccf5be8:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733252199280.34bc197540be118162007a076a6157bd. 2024-12-03T18:56:49,573 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db5a5ccf5be8:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-12-03T18:56:49,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34057 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-12-03T18:56:49,578 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-12-03T18:56:49,578 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 245 msec 2024-12-03T18:56:49,580 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 256 msec 2024-12-03T18:56:50,410 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,44293,1733252112865/db5a5ccf5be8%2C44293%2C1733252112865.1733252113091 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:56:50,445 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.meta.1733252112663.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:56:51,412 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,44293,1733252112865/db5a5ccf5be8%2C44293%2C1733252112865.1733252113091 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:56:51,445 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.meta.1733252112663.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:56:52,412 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,44293,1733252112865/db5a5ccf5be8%2C44293%2C1733252112865.1733252113091 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:56:52,445 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.meta.1733252112663.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:56:53,413 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,44293,1733252112865/db5a5ccf5be8%2C44293%2C1733252112865.1733252113091 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:56:53,446 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.meta.1733252112663.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:56:54,414 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,44293,1733252112865/db5a5ccf5be8%2C44293%2C1733252112865.1733252113091 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:56:54,447 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.meta.1733252112663.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:56:55,415 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,44293,1733252112865/db5a5ccf5be8%2C44293%2C1733252112865.1733252113091 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:56:55,448 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.meta.1733252112663.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:56:56,416 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,44293,1733252112865/db5a5ccf5be8%2C44293%2C1733252112865.1733252113091 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:56:56,448 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.meta.1733252112663.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:56:57,417 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,44293,1733252112865/db5a5ccf5be8%2C44293%2C1733252112865.1733252113091 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:56:57,449 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.meta.1733252112663.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:56:58,418 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,44293,1733252112865/db5a5ccf5be8%2C44293%2C1733252112865.1733252113091 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:56:58,450 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.meta.1733252112663.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:56:59,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34057 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-03T18:56:59,361 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-12-03T18:56:59,370 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34057 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-03T18:56:59,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34057 {}] procedure2.ProcedureExecutor(1139): Stored pid=9, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-03T18:56:59,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34057 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=9 2024-12-03T18:56:59,372 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=9, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-12-03T18:56:59,373 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=9, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-03T18:56:59,373 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-03T18:56:59,419 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,44293,1733252112865/db5a5ccf5be8%2C44293%2C1733252112865.1733252113091 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:56:59,420 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,44293,1733252112865/db5a5ccf5be8%2C44293%2C1733252112865.1733252113091 after 68063ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor191.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T18:56:59,451 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.meta.1733252112663.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:56:59,452 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.meta.1733252112663.meta after 68049ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor191.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T18:56:59,526 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39923 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=10 2024-12-03T18:56:59,527 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db5a5ccf5be8:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733252199280.34bc197540be118162007a076a6157bd. 2024-12-03T18:56:59,527 INFO [RS_FLUSH_OPERATIONS-regionserver/db5a5ccf5be8:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(2902): Flushing 34bc197540be118162007a076a6157bd 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-12-03T18:56:59,535 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db5a5ccf5be8:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37713/user/jenkins/test-data/3b55791f-cea7-8ac8-db4f-7cb0af809f0f/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/34bc197540be118162007a076a6157bd/.tmp/info/a49b425d410d490080c6ed9759303d0f is 1080, key is row0002/info:/1733252219365/Put/seqid=0 2024-12-03T18:56:59,545 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34083 is added to blk_1073741838_1014 (size=6033) 2024-12-03T18:56:59,545 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41215 is added to blk_1073741838_1014 (size=6033) 2024-12-03T18:56:59,545 INFO [RS_FLUSH_OPERATIONS-regionserver/db5a5ccf5be8:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=9 (bloomFilter=true), to=hdfs://localhost:37713/user/jenkins/test-data/3b55791f-cea7-8ac8-db4f-7cb0af809f0f/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/34bc197540be118162007a076a6157bd/.tmp/info/a49b425d410d490080c6ed9759303d0f 2024-12-03T18:56:59,553 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db5a5ccf5be8:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37713/user/jenkins/test-data/3b55791f-cea7-8ac8-db4f-7cb0af809f0f/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/34bc197540be118162007a076a6157bd/.tmp/info/a49b425d410d490080c6ed9759303d0f as hdfs://localhost:37713/user/jenkins/test-data/3b55791f-cea7-8ac8-db4f-7cb0af809f0f/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/34bc197540be118162007a076a6157bd/info/a49b425d410d490080c6ed9759303d0f 2024-12-03T18:56:59,559 INFO [RS_FLUSH_OPERATIONS-regionserver/db5a5ccf5be8:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37713/user/jenkins/test-data/3b55791f-cea7-8ac8-db4f-7cb0af809f0f/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/34bc197540be118162007a076a6157bd/info/a49b425d410d490080c6ed9759303d0f, entries=1, sequenceid=9, filesize=5.9 K 2024-12-03T18:56:59,560 INFO [RS_FLUSH_OPERATIONS-regionserver/db5a5ccf5be8:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 34bc197540be118162007a076a6157bd in 33ms, sequenceid=9, compaction requested=false 2024-12-03T18:56:59,560 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db5a5ccf5be8:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(2603): Flush status journal for 34bc197540be118162007a076a6157bd: 2024-12-03T18:56:59,560 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db5a5ccf5be8:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733252199280.34bc197540be118162007a076a6157bd. 2024-12-03T18:56:59,560 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db5a5ccf5be8:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=10 2024-12-03T18:56:59,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34057 {}] master.HMaster(4169): Remote procedure done, pid=10 2024-12-03T18:56:59,564 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=10, resume processing ppid=9 2024-12-03T18:56:59,564 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=9, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 189 msec 2024-12-03T18:56:59,567 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 195 msec 2024-12-03T18:57:00,420 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,44293,1733252112865/db5a5ccf5be8%2C44293%2C1733252112865.1733252113091 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:57:00,452 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.meta.1733252112663.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:57:01,421 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,44293,1733252112865/db5a5ccf5be8%2C44293%2C1733252112865.1733252113091 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:57:01,453 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.meta.1733252112663.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:57:02,422 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,44293,1733252112865/db5a5ccf5be8%2C44293%2C1733252112865.1733252113091 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:57:02,454 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.meta.1733252112663.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:57:03,422 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,44293,1733252112865/db5a5ccf5be8%2C44293%2C1733252112865.1733252113091 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:57:03,454 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.meta.1733252112663.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:57:04,423 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,44293,1733252112865/db5a5ccf5be8%2C44293%2C1733252112865.1733252113091 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:57:04,455 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.meta.1733252112663.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:57:05,424 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,44293,1733252112865/db5a5ccf5be8%2C44293%2C1733252112865.1733252113091 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:57:05,456 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.meta.1733252112663.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:57:06,424 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,44293,1733252112865/db5a5ccf5be8%2C44293%2C1733252112865.1733252113091 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:57:06,456 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.meta.1733252112663.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:57:07,425 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,44293,1733252112865/db5a5ccf5be8%2C44293%2C1733252112865.1733252113091 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:57:07,457 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.meta.1733252112663.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:57:07,952 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-03T18:57:08,425 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,44293,1733252112865/db5a5ccf5be8%2C44293%2C1733252112865.1733252113091 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:57:08,458 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.meta.1733252112663.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:57:09,426 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,44293,1733252112865/db5a5ccf5be8%2C44293%2C1733252112865.1733252113091 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:57:09,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34057 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=9 2024-12-03T18:57:09,439 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-12-03T18:57:09,442 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor db5a5ccf5be8%2C39923%2C1733252198139.1733252229441 2024-12-03T18:57:09,447 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:57:09,447 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:57:09,447 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:57:09,447 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:57:09,447 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:57:09,448 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/3b55791f-cea7-8ac8-db4f-7cb0af809f0f/WALs/db5a5ccf5be8,39923,1733252198139/db5a5ccf5be8%2C39923%2C1733252198139.1733252198757 with entries=8, filesize=5.41 KB; new WAL /user/jenkins/test-data/3b55791f-cea7-8ac8-db4f-7cb0af809f0f/WALs/db5a5ccf5be8,39923,1733252198139/db5a5ccf5be8%2C39923%2C1733252198139.1733252229441 2024-12-03T18:57:09,449 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45671:45671),(127.0.0.1/127.0.0.1:35857:35857)] 2024-12-03T18:57:09,449 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:37713/user/jenkins/test-data/3b55791f-cea7-8ac8-db4f-7cb0af809f0f/WALs/db5a5ccf5be8,39923,1733252198139/db5a5ccf5be8%2C39923%2C1733252198139.1733252198757 is not closed yet, will try archiving it next time 2024-12-03T18:57:09,450 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34083 is added to blk_1073741833_1009 (size=5546) 2024-12-03T18:57:09,450 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41215 is added to blk_1073741833_1009 (size=5546) 2024-12-03T18:57:09,450 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34057 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-03T18:57:09,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34057 {}] procedure2.ProcedureExecutor(1139): Stored pid=11, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-03T18:57:09,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34057 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=11 2024-12-03T18:57:09,454 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=11, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-12-03T18:57:09,455 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=11, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-03T18:57:09,455 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=12, ppid=11, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-03T18:57:09,458 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.meta.1733252112663.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:57:09,608 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39923 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=12 2024-12-03T18:57:09,609 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db5a5ccf5be8:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733252199280.34bc197540be118162007a076a6157bd. 2024-12-03T18:57:09,609 INFO [RS_FLUSH_OPERATIONS-regionserver/db5a5ccf5be8:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(2902): Flushing 34bc197540be118162007a076a6157bd 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-12-03T18:57:09,613 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db5a5ccf5be8:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37713/user/jenkins/test-data/3b55791f-cea7-8ac8-db4f-7cb0af809f0f/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/34bc197540be118162007a076a6157bd/.tmp/info/cb32b6d28c1543bfb39cf899268b00bf is 1080, key is row0003/info:/1733252229440/Put/seqid=0 2024-12-03T18:57:09,618 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41215 is added to blk_1073741840_1016 (size=6033) 2024-12-03T18:57:09,618 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34083 is added to blk_1073741840_1016 (size=6033) 2024-12-03T18:57:09,619 INFO [RS_FLUSH_OPERATIONS-regionserver/db5a5ccf5be8:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:37713/user/jenkins/test-data/3b55791f-cea7-8ac8-db4f-7cb0af809f0f/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/34bc197540be118162007a076a6157bd/.tmp/info/cb32b6d28c1543bfb39cf899268b00bf 2024-12-03T18:57:09,626 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db5a5ccf5be8:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37713/user/jenkins/test-data/3b55791f-cea7-8ac8-db4f-7cb0af809f0f/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/34bc197540be118162007a076a6157bd/.tmp/info/cb32b6d28c1543bfb39cf899268b00bf as hdfs://localhost:37713/user/jenkins/test-data/3b55791f-cea7-8ac8-db4f-7cb0af809f0f/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/34bc197540be118162007a076a6157bd/info/cb32b6d28c1543bfb39cf899268b00bf 2024-12-03T18:57:09,632 INFO [RS_FLUSH_OPERATIONS-regionserver/db5a5ccf5be8:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37713/user/jenkins/test-data/3b55791f-cea7-8ac8-db4f-7cb0af809f0f/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/34bc197540be118162007a076a6157bd/info/cb32b6d28c1543bfb39cf899268b00bf, entries=1, sequenceid=13, filesize=5.9 K 2024-12-03T18:57:09,633 INFO [RS_FLUSH_OPERATIONS-regionserver/db5a5ccf5be8:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 34bc197540be118162007a076a6157bd in 24ms, sequenceid=13, compaction requested=true 2024-12-03T18:57:09,633 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db5a5ccf5be8:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(2603): Flush status journal for 34bc197540be118162007a076a6157bd: 2024-12-03T18:57:09,633 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db5a5ccf5be8:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733252199280.34bc197540be118162007a076a6157bd. 2024-12-03T18:57:09,634 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db5a5ccf5be8:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=12 2024-12-03T18:57:09,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34057 {}] master.HMaster(4169): Remote procedure done, pid=12 2024-12-03T18:57:09,639 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=12, resume processing ppid=11 2024-12-03T18:57:09,639 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, ppid=11, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 180 msec 2024-12-03T18:57:09,643 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 190 msec 2024-12-03T18:57:10,427 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,44293,1733252112865/db5a5ccf5be8%2C44293%2C1733252112865.1733252113091 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:57:10,459 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.meta.1733252112663.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:57:11,428 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,44293,1733252112865/db5a5ccf5be8%2C44293%2C1733252112865.1733252113091 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:57:11,460 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.meta.1733252112663.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:57:12,429 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,44293,1733252112865/db5a5ccf5be8%2C44293%2C1733252112865.1733252113091 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:57:12,461 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.meta.1733252112663.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:57:13,430 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,44293,1733252112865/db5a5ccf5be8%2C44293%2C1733252112865.1733252113091 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:57:13,462 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.meta.1733252112663.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:57:14,430 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,44293,1733252112865/db5a5ccf5be8%2C44293%2C1733252112865.1733252113091 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:57:14,462 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.meta.1733252112663.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:57:15,431 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,44293,1733252112865/db5a5ccf5be8%2C44293%2C1733252112865.1733252113091 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:57:15,463 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.meta.1733252112663.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:57:16,432 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,44293,1733252112865/db5a5ccf5be8%2C44293%2C1733252112865.1733252113091 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:57:16,464 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.meta.1733252112663.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:57:17,433 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,44293,1733252112865/db5a5ccf5be8%2C44293%2C1733252112865.1733252113091 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:57:17,465 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.meta.1733252112663.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:57:18,433 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,44293,1733252112865/db5a5ccf5be8%2C44293%2C1733252112865.1733252113091 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:57:18,465 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.meta.1733252112663.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:57:19,435 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,44293,1733252112865/db5a5ccf5be8%2C44293%2C1733252112865.1733252113091 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:57:19,466 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.meta.1733252112663.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:57:19,526 INFO [master/db5a5ccf5be8:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-12-03T18:57:19,526 INFO [master/db5a5ccf5be8:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-12-03T18:57:19,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34057 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=11 2024-12-03T18:57:19,560 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-12-03T18:57:19,560 DEBUG [Time-limited test {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-03T18:57:19,563 DEBUG [Time-limited test {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 18099 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-03T18:57:19,563 DEBUG [Time-limited test {}] regionserver.HStore(1541): 34bc197540be118162007a076a6157bd/info is initiating minor compaction (all files) 2024-12-03T18:57:19,564 INFO [Time-limited test {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-03T18:57:19,564 INFO [Time-limited test {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-03T18:57:19,564 INFO [Time-limited test {}] regionserver.HRegion(2416): Starting compaction of 34bc197540be118162007a076a6157bd/info in TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733252199280.34bc197540be118162007a076a6157bd. 2024-12-03T18:57:19,564 INFO [Time-limited test {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37713/user/jenkins/test-data/3b55791f-cea7-8ac8-db4f-7cb0af809f0f/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/34bc197540be118162007a076a6157bd/info/f9d7690faeab4e7bba3ef074c9dc9d1c, hdfs://localhost:37713/user/jenkins/test-data/3b55791f-cea7-8ac8-db4f-7cb0af809f0f/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/34bc197540be118162007a076a6157bd/info/a49b425d410d490080c6ed9759303d0f, hdfs://localhost:37713/user/jenkins/test-data/3b55791f-cea7-8ac8-db4f-7cb0af809f0f/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/34bc197540be118162007a076a6157bd/info/cb32b6d28c1543bfb39cf899268b00bf] into tmpdir=hdfs://localhost:37713/user/jenkins/test-data/3b55791f-cea7-8ac8-db4f-7cb0af809f0f/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/34bc197540be118162007a076a6157bd/.tmp, totalSize=17.7 K 2024-12-03T18:57:19,566 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting f9d7690faeab4e7bba3ef074c9dc9d1c, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=5, earliestPutTs=1733252209317 2024-12-03T18:57:19,567 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting a49b425d410d490080c6ed9759303d0f, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=9, earliestPutTs=1733252219365 2024-12-03T18:57:19,567 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting cb32b6d28c1543bfb39cf899268b00bf, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1733252229440 2024-12-03T18:57:19,582 INFO [Time-limited test {}] throttle.PressureAwareThroughputController(145): 34bc197540be118162007a076a6157bd#info#compaction#46 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-03T18:57:19,583 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37713/user/jenkins/test-data/3b55791f-cea7-8ac8-db4f-7cb0af809f0f/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/34bc197540be118162007a076a6157bd/.tmp/info/989d5cc52e3a443e8f2319b8370709c6 is 1080, key is row0001/info:/1733252209317/Put/seqid=0 2024-12-03T18:57:19,588 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34083 is added to blk_1073741841_1017 (size=8296) 2024-12-03T18:57:19,588 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41215 is added to blk_1073741841_1017 (size=8296) 2024-12-03T18:57:19,594 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37713/user/jenkins/test-data/3b55791f-cea7-8ac8-db4f-7cb0af809f0f/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/34bc197540be118162007a076a6157bd/.tmp/info/989d5cc52e3a443e8f2319b8370709c6 as hdfs://localhost:37713/user/jenkins/test-data/3b55791f-cea7-8ac8-db4f-7cb0af809f0f/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/34bc197540be118162007a076a6157bd/info/989d5cc52e3a443e8f2319b8370709c6 2024-12-03T18:57:19,600 INFO [Time-limited test {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 34bc197540be118162007a076a6157bd/info of 34bc197540be118162007a076a6157bd into 989d5cc52e3a443e8f2319b8370709c6(size=8.1 K), total size for store is 8.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T18:57:19,600 DEBUG [Time-limited test {}] regionserver.HRegion(2446): Compaction status journal for 34bc197540be118162007a076a6157bd: 2024-12-03T18:57:19,603 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor db5a5ccf5be8%2C39923%2C1733252198139.1733252239602 2024-12-03T18:57:19,609 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:57:19,609 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:57:19,609 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:57:19,609 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:57:19,609 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:57:19,609 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/3b55791f-cea7-8ac8-db4f-7cb0af809f0f/WALs/db5a5ccf5be8,39923,1733252198139/db5a5ccf5be8%2C39923%2C1733252198139.1733252229441 with entries=4, filesize=2.45 KB; new WAL /user/jenkins/test-data/3b55791f-cea7-8ac8-db4f-7cb0af809f0f/WALs/db5a5ccf5be8,39923,1733252198139/db5a5ccf5be8%2C39923%2C1733252198139.1733252239602 2024-12-03T18:57:19,611 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35857:35857),(127.0.0.1/127.0.0.1:45671:45671)] 2024-12-03T18:57:19,611 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:37713/user/jenkins/test-data/3b55791f-cea7-8ac8-db4f-7cb0af809f0f/WALs/db5a5ccf5be8,39923,1733252198139/db5a5ccf5be8%2C39923%2C1733252198139.1733252229441 is not closed yet, will try archiving it next time 2024-12-03T18:57:19,611 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41215 is added to blk_1073741839_1015 (size=2520) 2024-12-03T18:57:19,611 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34083 is added to blk_1073741839_1015 (size=2520) 2024-12-03T18:57:19,612 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:37713/user/jenkins/test-data/3b55791f-cea7-8ac8-db4f-7cb0af809f0f/WALs/db5a5ccf5be8,39923,1733252198139/db5a5ccf5be8%2C39923%2C1733252198139.1733252198757 to hdfs://localhost:37713/user/jenkins/test-data/3b55791f-cea7-8ac8-db4f-7cb0af809f0f/oldWALs/db5a5ccf5be8%2C39923%2C1733252198139.1733252198757 2024-12-03T18:57:19,612 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34057 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-03T18:57:19,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34057 {}] procedure2.ProcedureExecutor(1139): Stored pid=13, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-03T18:57:19,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34057 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=13 2024-12-03T18:57:19,614 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=13, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-12-03T18:57:19,615 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=13, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-03T18:57:19,615 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=14, ppid=13, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-03T18:57:19,768 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39923 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=14 2024-12-03T18:57:19,769 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db5a5ccf5be8:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733252199280.34bc197540be118162007a076a6157bd. 2024-12-03T18:57:19,769 INFO [RS_FLUSH_OPERATIONS-regionserver/db5a5ccf5be8:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(2902): Flushing 34bc197540be118162007a076a6157bd 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-12-03T18:57:19,773 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db5a5ccf5be8:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37713/user/jenkins/test-data/3b55791f-cea7-8ac8-db4f-7cb0af809f0f/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/34bc197540be118162007a076a6157bd/.tmp/info/b09764db4eea459fa829068bc4df9731 is 1080, key is row0000/info:/1733252239601/Put/seqid=0 2024-12-03T18:57:19,777 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34083 is added to blk_1073741843_1019 (size=6033) 2024-12-03T18:57:19,777 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41215 is added to blk_1073741843_1019 (size=6033) 2024-12-03T18:57:19,778 INFO [RS_FLUSH_OPERATIONS-regionserver/db5a5ccf5be8:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=18 (bloomFilter=true), to=hdfs://localhost:37713/user/jenkins/test-data/3b55791f-cea7-8ac8-db4f-7cb0af809f0f/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/34bc197540be118162007a076a6157bd/.tmp/info/b09764db4eea459fa829068bc4df9731 2024-12-03T18:57:19,783 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db5a5ccf5be8:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37713/user/jenkins/test-data/3b55791f-cea7-8ac8-db4f-7cb0af809f0f/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/34bc197540be118162007a076a6157bd/.tmp/info/b09764db4eea459fa829068bc4df9731 as hdfs://localhost:37713/user/jenkins/test-data/3b55791f-cea7-8ac8-db4f-7cb0af809f0f/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/34bc197540be118162007a076a6157bd/info/b09764db4eea459fa829068bc4df9731 2024-12-03T18:57:19,789 INFO [RS_FLUSH_OPERATIONS-regionserver/db5a5ccf5be8:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37713/user/jenkins/test-data/3b55791f-cea7-8ac8-db4f-7cb0af809f0f/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/34bc197540be118162007a076a6157bd/info/b09764db4eea459fa829068bc4df9731, entries=1, sequenceid=18, filesize=5.9 K 2024-12-03T18:57:19,790 INFO [RS_FLUSH_OPERATIONS-regionserver/db5a5ccf5be8:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 34bc197540be118162007a076a6157bd in 21ms, sequenceid=18, compaction requested=false 2024-12-03T18:57:19,790 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db5a5ccf5be8:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(2603): Flush status journal for 34bc197540be118162007a076a6157bd: 2024-12-03T18:57:19,790 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db5a5ccf5be8:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733252199280.34bc197540be118162007a076a6157bd. 2024-12-03T18:57:19,791 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db5a5ccf5be8:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=14 2024-12-03T18:57:19,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34057 {}] master.HMaster(4169): Remote procedure done, pid=14 2024-12-03T18:57:19,796 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=14, resume processing ppid=13 2024-12-03T18:57:19,796 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=14, ppid=13, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 177 msec 2024-12-03T18:57:19,798 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 184 msec 2024-12-03T18:57:20,436 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,44293,1733252112865/db5a5ccf5be8%2C44293%2C1733252112865.1733252113091 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:57:20,467 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.meta.1733252112663.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:57:21,437 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,44293,1733252112865/db5a5ccf5be8%2C44293%2C1733252112865.1733252113091 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:57:21,468 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.meta.1733252112663.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:57:22,438 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,44293,1733252112865/db5a5ccf5be8%2C44293%2C1733252112865.1733252113091 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:57:22,469 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.meta.1733252112663.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:57:23,439 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,44293,1733252112865/db5a5ccf5be8%2C44293%2C1733252112865.1733252113091 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:57:23,470 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.meta.1733252112663.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:57:24,439 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,44293,1733252112865/db5a5ccf5be8%2C44293%2C1733252112865.1733252113091 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:57:24,471 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.meta.1733252112663.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:57:24,624 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 34bc197540be118162007a076a6157bd, had cached 0 bytes from a total of 14329 2024-12-03T18:57:25,441 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,44293,1733252112865/db5a5ccf5be8%2C44293%2C1733252112865.1733252113091 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:57:25,472 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.meta.1733252112663.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:57:26,441 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,44293,1733252112865/db5a5ccf5be8%2C44293%2C1733252112865.1733252113091 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:57:26,473 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.meta.1733252112663.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:57:27,442 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,44293,1733252112865/db5a5ccf5be8%2C44293%2C1733252112865.1733252113091 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:57:27,473 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.meta.1733252112663.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:57:28,443 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,44293,1733252112865/db5a5ccf5be8%2C44293%2C1733252112865.1733252113091 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:57:28,474 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.meta.1733252112663.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:57:29,444 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,44293,1733252112865/db5a5ccf5be8%2C44293%2C1733252112865.1733252113091 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:57:29,475 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.meta.1733252112663.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:57:29,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34057 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=13 2024-12-03T18:57:29,710 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-12-03T18:57:29,716 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor db5a5ccf5be8%2C39923%2C1733252198139.1733252249715 2024-12-03T18:57:29,773 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:57:29,773 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:57:29,774 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:57:29,774 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:57:29,774 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:57:29,774 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/3b55791f-cea7-8ac8-db4f-7cb0af809f0f/WALs/db5a5ccf5be8,39923,1733252198139/db5a5ccf5be8%2C39923%2C1733252198139.1733252239602 with entries=3, filesize=1.97 KB; new WAL /user/jenkins/test-data/3b55791f-cea7-8ac8-db4f-7cb0af809f0f/WALs/db5a5ccf5be8,39923,1733252198139/db5a5ccf5be8%2C39923%2C1733252198139.1733252249715 2024-12-03T18:57:29,776 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35857:35857),(127.0.0.1/127.0.0.1:45671:45671)] 2024-12-03T18:57:29,776 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:37713/user/jenkins/test-data/3b55791f-cea7-8ac8-db4f-7cb0af809f0f/WALs/db5a5ccf5be8,39923,1733252198139/db5a5ccf5be8%2C39923%2C1733252198139.1733252239602 is not closed yet, will try archiving it next time 2024-12-03T18:57:29,777 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-03T18:57:29,777 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:37713/user/jenkins/test-data/3b55791f-cea7-8ac8-db4f-7cb0af809f0f/WALs/db5a5ccf5be8,39923,1733252198139/db5a5ccf5be8%2C39923%2C1733252198139.1733252229441 to hdfs://localhost:37713/user/jenkins/test-data/3b55791f-cea7-8ac8-db4f-7cb0af809f0f/oldWALs/db5a5ccf5be8%2C39923%2C1733252198139.1733252229441 2024-12-03T18:57:29,777 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-03T18:57:29,777 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-03T18:57:29,777 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T18:57:29,778 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-03T18:57:29,778 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T18:57:29,778 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34083 is added to blk_1073741842_1018 (size=2026) 2024-12-03T18:57:29,778 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-03T18:57:29,779 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1252840442, stopped=false 2024-12-03T18:57:29,779 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=db5a5ccf5be8,34057,1733252197976 2024-12-03T18:57:29,779 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41215 is added to blk_1073741842_1018 (size=2026) 2024-12-03T18:57:29,879 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39923-0x1019c8da9ee0001, quorum=127.0.0.1:60004, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-03T18:57:29,880 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39923-0x1019c8da9ee0001, quorum=127.0.0.1:60004, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T18:57:29,880 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34057-0x1019c8da9ee0000, quorum=127.0.0.1:60004, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-03T18:57:29,880 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-03T18:57:29,880 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34057-0x1019c8da9ee0000, quorum=127.0.0.1:60004, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T18:57:29,881 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-03T18:57:29,882 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:39923-0x1019c8da9ee0001, quorum=127.0.0.1:60004, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-03T18:57:29,882 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:34057-0x1019c8da9ee0000, quorum=127.0.0.1:60004, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-03T18:57:29,882 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-03T18:57:29,882 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T18:57:29,882 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'db5a5ccf5be8,39923,1733252198139' ***** 2024-12-03T18:57:29,883 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-03T18:57:29,883 INFO [RS:0;db5a5ccf5be8:39923 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-03T18:57:29,883 INFO [RS:0;db5a5ccf5be8:39923 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-03T18:57:29,883 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-03T18:57:29,883 INFO [RS:0;db5a5ccf5be8:39923 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-03T18:57:29,883 INFO [RS:0;db5a5ccf5be8:39923 {}] regionserver.HRegionServer(3091): Received CLOSE for 34bc197540be118162007a076a6157bd 2024-12-03T18:57:29,884 INFO [RS:0;db5a5ccf5be8:39923 {}] regionserver.HRegionServer(959): stopping server db5a5ccf5be8,39923,1733252198139 2024-12-03T18:57:29,884 INFO [RS:0;db5a5ccf5be8:39923 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-03T18:57:29,884 INFO [RS:0;db5a5ccf5be8:39923 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;db5a5ccf5be8:39923. 2024-12-03T18:57:29,884 DEBUG [RS:0;db5a5ccf5be8:39923 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-03T18:57:29,884 DEBUG [RS_CLOSE_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 34bc197540be118162007a076a6157bd, disabling compactions & flushes 2024-12-03T18:57:29,884 DEBUG [RS:0;db5a5ccf5be8:39923 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T18:57:29,884 INFO [RS_CLOSE_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733252199280.34bc197540be118162007a076a6157bd. 2024-12-03T18:57:29,884 INFO [RS:0;db5a5ccf5be8:39923 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-03T18:57:29,884 DEBUG [RS_CLOSE_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733252199280.34bc197540be118162007a076a6157bd. 2024-12-03T18:57:29,884 INFO [RS:0;db5a5ccf5be8:39923 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-03T18:57:29,884 DEBUG [RS_CLOSE_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733252199280.34bc197540be118162007a076a6157bd. after waiting 0 ms 2024-12-03T18:57:29,884 INFO [RS:0;db5a5ccf5be8:39923 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-03T18:57:29,884 DEBUG [RS_CLOSE_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733252199280.34bc197540be118162007a076a6157bd. 2024-12-03T18:57:29,884 INFO [RS:0;db5a5ccf5be8:39923 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-03T18:57:29,884 INFO [RS_CLOSE_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing 34bc197540be118162007a076a6157bd 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-12-03T18:57:29,884 INFO [RS:0;db5a5ccf5be8:39923 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-12-03T18:57:29,884 DEBUG [RS:0;db5a5ccf5be8:39923 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, 34bc197540be118162007a076a6157bd=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733252199280.34bc197540be118162007a076a6157bd.} 2024-12-03T18:57:29,885 DEBUG [RS:0;db5a5ccf5be8:39923 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 34bc197540be118162007a076a6157bd 2024-12-03T18:57:29,885 DEBUG [RS_CLOSE_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-03T18:57:29,885 INFO [RS_CLOSE_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-03T18:57:29,885 DEBUG [RS_CLOSE_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-03T18:57:29,885 DEBUG [RS_CLOSE_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-03T18:57:29,885 DEBUG [RS_CLOSE_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-03T18:57:29,885 INFO [RS_CLOSE_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.89 KB heapSize=3.91 KB 2024-12-03T18:57:29,891 DEBUG [RS_CLOSE_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37713/user/jenkins/test-data/3b55791f-cea7-8ac8-db4f-7cb0af809f0f/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/34bc197540be118162007a076a6157bd/.tmp/info/7af64b58a0274c4a83f5edd3286e0d48 is 1080, key is row0001/info:/1733252249712/Put/seqid=0 2024-12-03T18:57:29,897 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34083 is added to blk_1073741845_1021 (size=6033) 2024-12-03T18:57:29,898 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41215 is added to blk_1073741845_1021 (size=6033) 2024-12-03T18:57:29,898 INFO [RS_CLOSE_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=22 (bloomFilter=true), to=hdfs://localhost:37713/user/jenkins/test-data/3b55791f-cea7-8ac8-db4f-7cb0af809f0f/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/34bc197540be118162007a076a6157bd/.tmp/info/7af64b58a0274c4a83f5edd3286e0d48 2024-12-03T18:57:29,902 DEBUG [RS_CLOSE_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37713/user/jenkins/test-data/3b55791f-cea7-8ac8-db4f-7cb0af809f0f/data/hbase/meta/1588230740/.tmp/info/3e8cd2a916c54524a93519116d6cc07f is 227, key is TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733252199280.34bc197540be118162007a076a6157bd./info:regioninfo/1733252199638/Put/seqid=0 2024-12-03T18:57:29,905 DEBUG [RS_CLOSE_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37713/user/jenkins/test-data/3b55791f-cea7-8ac8-db4f-7cb0af809f0f/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/34bc197540be118162007a076a6157bd/.tmp/info/7af64b58a0274c4a83f5edd3286e0d48 as hdfs://localhost:37713/user/jenkins/test-data/3b55791f-cea7-8ac8-db4f-7cb0af809f0f/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/34bc197540be118162007a076a6157bd/info/7af64b58a0274c4a83f5edd3286e0d48 2024-12-03T18:57:29,906 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41215 is added to blk_1073741846_1022 (size=7308) 2024-12-03T18:57:29,907 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34083 is added to blk_1073741846_1022 (size=7308) 2024-12-03T18:57:29,908 INFO [RS_CLOSE_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.65 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:37713/user/jenkins/test-data/3b55791f-cea7-8ac8-db4f-7cb0af809f0f/data/hbase/meta/1588230740/.tmp/info/3e8cd2a916c54524a93519116d6cc07f 2024-12-03T18:57:29,912 INFO [RS_CLOSE_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37713/user/jenkins/test-data/3b55791f-cea7-8ac8-db4f-7cb0af809f0f/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/34bc197540be118162007a076a6157bd/info/7af64b58a0274c4a83f5edd3286e0d48, entries=1, sequenceid=22, filesize=5.9 K 2024-12-03T18:57:29,913 INFO [RS_CLOSE_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 34bc197540be118162007a076a6157bd in 29ms, sequenceid=22, compaction requested=true 2024-12-03T18:57:29,913 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733252199280.34bc197540be118162007a076a6157bd.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:37713/user/jenkins/test-data/3b55791f-cea7-8ac8-db4f-7cb0af809f0f/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/34bc197540be118162007a076a6157bd/info/f9d7690faeab4e7bba3ef074c9dc9d1c, hdfs://localhost:37713/user/jenkins/test-data/3b55791f-cea7-8ac8-db4f-7cb0af809f0f/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/34bc197540be118162007a076a6157bd/info/a49b425d410d490080c6ed9759303d0f, hdfs://localhost:37713/user/jenkins/test-data/3b55791f-cea7-8ac8-db4f-7cb0af809f0f/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/34bc197540be118162007a076a6157bd/info/cb32b6d28c1543bfb39cf899268b00bf] to archive 2024-12-03T18:57:29,914 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733252199280.34bc197540be118162007a076a6157bd.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-03T18:57:29,916 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733252199280.34bc197540be118162007a076a6157bd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37713/user/jenkins/test-data/3b55791f-cea7-8ac8-db4f-7cb0af809f0f/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/34bc197540be118162007a076a6157bd/info/f9d7690faeab4e7bba3ef074c9dc9d1c to hdfs://localhost:37713/user/jenkins/test-data/3b55791f-cea7-8ac8-db4f-7cb0af809f0f/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/34bc197540be118162007a076a6157bd/info/f9d7690faeab4e7bba3ef074c9dc9d1c 2024-12-03T18:57:29,917 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733252199280.34bc197540be118162007a076a6157bd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37713/user/jenkins/test-data/3b55791f-cea7-8ac8-db4f-7cb0af809f0f/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/34bc197540be118162007a076a6157bd/info/a49b425d410d490080c6ed9759303d0f to hdfs://localhost:37713/user/jenkins/test-data/3b55791f-cea7-8ac8-db4f-7cb0af809f0f/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/34bc197540be118162007a076a6157bd/info/a49b425d410d490080c6ed9759303d0f 2024-12-03T18:57:29,918 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733252199280.34bc197540be118162007a076a6157bd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37713/user/jenkins/test-data/3b55791f-cea7-8ac8-db4f-7cb0af809f0f/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/34bc197540be118162007a076a6157bd/info/cb32b6d28c1543bfb39cf899268b00bf to hdfs://localhost:37713/user/jenkins/test-data/3b55791f-cea7-8ac8-db4f-7cb0af809f0f/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/34bc197540be118162007a076a6157bd/info/cb32b6d28c1543bfb39cf899268b00bf 2024-12-03T18:57:29,918 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733252199280.34bc197540be118162007a076a6157bd.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=db5a5ccf5be8:34057 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 16 more 2024-12-03T18:57:29,919 WARN [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733252199280.34bc197540be118162007a076a6157bd.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [f9d7690faeab4e7bba3ef074c9dc9d1c=6033, a49b425d410d490080c6ed9759303d0f=6033, cb32b6d28c1543bfb39cf899268b00bf=6033] 2024-12-03T18:57:29,922 DEBUG [RS_CLOSE_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37713/user/jenkins/test-data/3b55791f-cea7-8ac8-db4f-7cb0af809f0f/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/34bc197540be118162007a076a6157bd/recovered.edits/25.seqid, newMaxSeqId=25, maxSeqId=1 2024-12-03T18:57:29,923 INFO [RS_CLOSE_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733252199280.34bc197540be118162007a076a6157bd. 2024-12-03T18:57:29,923 DEBUG [RS_CLOSE_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 34bc197540be118162007a076a6157bd: Waiting for close lock at 1733252249884Running coprocessor pre-close hooks at 1733252249884Disabling compacts and flushes for region at 1733252249884Disabling writes for close at 1733252249884Obtaining lock to block concurrent updates at 1733252249884Preparing flush snapshotting stores in 34bc197540be118162007a076a6157bd at 1733252249884Finished memstore snapshotting TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733252199280.34bc197540be118162007a076a6157bd., syncing WAL and waiting on mvcc, flushsize=dataSize=1076, getHeapSize=1392, getOffHeapSize=0, getCellsCount=1 at 1733252249885 (+1 ms)Flushing stores of TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733252199280.34bc197540be118162007a076a6157bd. at 1733252249886 (+1 ms)Flushing 34bc197540be118162007a076a6157bd/info: creating writer at 1733252249886Flushing 34bc197540be118162007a076a6157bd/info: appending metadata at 1733252249890 (+4 ms)Flushing 34bc197540be118162007a076a6157bd/info: closing flushed file at 1733252249890Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5f67a8b0: reopening flushed file at 1733252249904 (+14 ms)Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 34bc197540be118162007a076a6157bd in 29ms, sequenceid=22, compaction requested=true at 1733252249913 (+9 ms)Writing region close event to WAL at 1733252249919 (+6 ms)Running coprocessor post-close hooks at 1733252249922 (+3 ms)Closed at 1733252249922 2024-12-03T18:57:29,923 DEBUG [RS_CLOSE_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733252199280.34bc197540be118162007a076a6157bd. 2024-12-03T18:57:29,927 DEBUG [RS_CLOSE_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37713/user/jenkins/test-data/3b55791f-cea7-8ac8-db4f-7cb0af809f0f/data/hbase/meta/1588230740/.tmp/ns/49c97b2d7e934b7a910c91efff490b1f is 43, key is default/ns:d/1733252199241/Put/seqid=0 2024-12-03T18:57:29,931 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34083 is added to blk_1073741847_1023 (size=5153) 2024-12-03T18:57:29,931 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41215 is added to blk_1073741847_1023 (size=5153) 2024-12-03T18:57:29,931 INFO [RS_CLOSE_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:37713/user/jenkins/test-data/3b55791f-cea7-8ac8-db4f-7cb0af809f0f/data/hbase/meta/1588230740/.tmp/ns/49c97b2d7e934b7a910c91efff490b1f 2024-12-03T18:57:29,948 DEBUG [RS_CLOSE_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37713/user/jenkins/test-data/3b55791f-cea7-8ac8-db4f-7cb0af809f0f/data/hbase/meta/1588230740/.tmp/table/b861f40771af45d7a3a8269aa2e863c4 is 89, key is TestLogRolling-testCompactionRecordDoesntBlockRolling/table:state/1733252199649/Put/seqid=0 2024-12-03T18:57:29,952 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34083 is added to blk_1073741848_1024 (size=5508) 2024-12-03T18:57:29,952 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41215 is added to blk_1073741848_1024 (size=5508) 2024-12-03T18:57:29,953 INFO [RS_CLOSE_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=170 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:37713/user/jenkins/test-data/3b55791f-cea7-8ac8-db4f-7cb0af809f0f/data/hbase/meta/1588230740/.tmp/table/b861f40771af45d7a3a8269aa2e863c4 2024-12-03T18:57:29,958 DEBUG [RS_CLOSE_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37713/user/jenkins/test-data/3b55791f-cea7-8ac8-db4f-7cb0af809f0f/data/hbase/meta/1588230740/.tmp/info/3e8cd2a916c54524a93519116d6cc07f as hdfs://localhost:37713/user/jenkins/test-data/3b55791f-cea7-8ac8-db4f-7cb0af809f0f/data/hbase/meta/1588230740/info/3e8cd2a916c54524a93519116d6cc07f 2024-12-03T18:57:29,962 INFO [RS_CLOSE_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37713/user/jenkins/test-data/3b55791f-cea7-8ac8-db4f-7cb0af809f0f/data/hbase/meta/1588230740/info/3e8cd2a916c54524a93519116d6cc07f, entries=10, sequenceid=11, filesize=7.1 K 2024-12-03T18:57:29,963 DEBUG [RS_CLOSE_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37713/user/jenkins/test-data/3b55791f-cea7-8ac8-db4f-7cb0af809f0f/data/hbase/meta/1588230740/.tmp/ns/49c97b2d7e934b7a910c91efff490b1f as hdfs://localhost:37713/user/jenkins/test-data/3b55791f-cea7-8ac8-db4f-7cb0af809f0f/data/hbase/meta/1588230740/ns/49c97b2d7e934b7a910c91efff490b1f 2024-12-03T18:57:29,968 INFO [RS_CLOSE_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37713/user/jenkins/test-data/3b55791f-cea7-8ac8-db4f-7cb0af809f0f/data/hbase/meta/1588230740/ns/49c97b2d7e934b7a910c91efff490b1f, entries=2, sequenceid=11, filesize=5.0 K 2024-12-03T18:57:29,968 DEBUG [RS_CLOSE_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37713/user/jenkins/test-data/3b55791f-cea7-8ac8-db4f-7cb0af809f0f/data/hbase/meta/1588230740/.tmp/table/b861f40771af45d7a3a8269aa2e863c4 as hdfs://localhost:37713/user/jenkins/test-data/3b55791f-cea7-8ac8-db4f-7cb0af809f0f/data/hbase/meta/1588230740/table/b861f40771af45d7a3a8269aa2e863c4 2024-12-03T18:57:29,973 INFO [RS_CLOSE_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37713/user/jenkins/test-data/3b55791f-cea7-8ac8-db4f-7cb0af809f0f/data/hbase/meta/1588230740/table/b861f40771af45d7a3a8269aa2e863c4, entries=2, sequenceid=11, filesize=5.4 K 2024-12-03T18:57:29,974 INFO [RS_CLOSE_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.89 KB/1932, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 1588230740 in 89ms, sequenceid=11, compaction requested=false 2024-12-03T18:57:29,978 DEBUG [RS_CLOSE_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37713/user/jenkins/test-data/3b55791f-cea7-8ac8-db4f-7cb0af809f0f/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-12-03T18:57:29,979 DEBUG [RS_CLOSE_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-03T18:57:29,979 INFO [RS_CLOSE_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-03T18:57:29,979 DEBUG [RS_CLOSE_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733252249885Running coprocessor pre-close hooks at 1733252249885Disabling compacts and flushes for region at 1733252249885Disabling writes for close at 1733252249885Obtaining lock to block concurrent updates at 1733252249885Preparing flush snapshotting stores in 1588230740 at 1733252249885Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1932, getHeapSize=3936, getOffHeapSize=0, getCellsCount=14 at 1733252249885Flushing stores of hbase:meta,,1.1588230740 at 1733252249886 (+1 ms)Flushing 1588230740/info: creating writer at 1733252249886Flushing 1588230740/info: appending metadata at 1733252249902 (+16 ms)Flushing 1588230740/info: closing flushed file at 1733252249902Flushing 1588230740/ns: creating writer at 1733252249912 (+10 ms)Flushing 1588230740/ns: appending metadata at 1733252249926 (+14 ms)Flushing 1588230740/ns: closing flushed file at 1733252249926Flushing 1588230740/table: creating writer at 1733252249936 (+10 ms)Flushing 1588230740/table: appending metadata at 1733252249947 (+11 ms)Flushing 1588230740/table: closing flushed file at 1733252249947Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7555d39: reopening flushed file at 1733252249957 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7dd651e9: reopening flushed file at 1733252249962 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@74095a91: reopening flushed file at 1733252249968 (+6 ms)Finished flush of dataSize ~1.89 KB/1932, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 1588230740 in 89ms, sequenceid=11, compaction requested=false at 1733252249974 (+6 ms)Writing region close event to WAL at 1733252249975 (+1 ms)Running coprocessor post-close hooks at 1733252249979 (+4 ms)Closed at 1733252249979 2024-12-03T18:57:29,979 DEBUG [RS_CLOSE_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-03T18:57:30,085 INFO [RS:0;db5a5ccf5be8:39923 {}] regionserver.HRegionServer(976): stopping server db5a5ccf5be8,39923,1733252198139; all regions closed. 2024-12-03T18:57:30,085 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:57:30,085 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:57:30,086 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:57:30,086 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:57:30,086 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:57:30,088 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34083 is added to blk_1073741834_1010 (size=3306) 2024-12-03T18:57:30,088 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41215 is added to blk_1073741834_1010 (size=3306) 2024-12-03T18:57:30,090 DEBUG [RS:0;db5a5ccf5be8:39923 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/3b55791f-cea7-8ac8-db4f-7cb0af809f0f/oldWALs 2024-12-03T18:57:30,090 INFO [RS:0;db5a5ccf5be8:39923 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog db5a5ccf5be8%2C39923%2C1733252198139.meta:.meta(num 1733252199161) 2024-12-03T18:57:30,090 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:57:30,090 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:57:30,091 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:57:30,091 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:57:30,091 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:57:30,092 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34083 is added to blk_1073741844_1020 (size=1252) 2024-12-03T18:57:30,093 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41215 is added to blk_1073741844_1020 (size=1252) 2024-12-03T18:57:30,096 DEBUG [RS:0;db5a5ccf5be8:39923 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/3b55791f-cea7-8ac8-db4f-7cb0af809f0f/oldWALs 2024-12-03T18:57:30,096 INFO [RS:0;db5a5ccf5be8:39923 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog db5a5ccf5be8%2C39923%2C1733252198139:(num 1733252249715) 2024-12-03T18:57:30,096 DEBUG [RS:0;db5a5ccf5be8:39923 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T18:57:30,096 INFO [RS:0;db5a5ccf5be8:39923 {}] regionserver.LeaseManager(133): Closed leases 2024-12-03T18:57:30,096 INFO [RS:0;db5a5ccf5be8:39923 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-03T18:57:30,097 INFO [RS:0;db5a5ccf5be8:39923 {}] hbase.ChoreService(370): Chore service for: regionserver/db5a5ccf5be8:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-12-03T18:57:30,097 INFO [RS:0;db5a5ccf5be8:39923 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-03T18:57:30,097 INFO [regionserver/db5a5ccf5be8:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-03T18:57:30,097 INFO [RS:0;db5a5ccf5be8:39923 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:39923 2024-12-03T18:57:30,101 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39923-0x1019c8da9ee0001, quorum=127.0.0.1:60004, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/db5a5ccf5be8,39923,1733252198139 2024-12-03T18:57:30,101 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34057-0x1019c8da9ee0000, quorum=127.0.0.1:60004, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-03T18:57:30,101 INFO [RS:0;db5a5ccf5be8:39923 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-03T18:57:30,111 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [db5a5ccf5be8,39923,1733252198139] 2024-12-03T18:57:30,122 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/db5a5ccf5be8,39923,1733252198139 already deleted, retry=false 2024-12-03T18:57:30,122 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; db5a5ccf5be8,39923,1733252198139 expired; onlineServers=0 2024-12-03T18:57:30,122 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'db5a5ccf5be8,34057,1733252197976' ***** 2024-12-03T18:57:30,122 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-03T18:57:30,122 INFO [M:0;db5a5ccf5be8:34057 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-03T18:57:30,122 INFO [M:0;db5a5ccf5be8:34057 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-03T18:57:30,122 DEBUG [M:0;db5a5ccf5be8:34057 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-03T18:57:30,122 DEBUG [M:0;db5a5ccf5be8:34057 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-03T18:57:30,122 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-03T18:57:30,122 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster-HFileCleaner.large.0-1733252198503 {}] cleaner.HFileCleaner(306): Exit Thread[master/db5a5ccf5be8:0:becomeActiveMaster-HFileCleaner.large.0-1733252198503,5,FailOnTimeoutGroup] 2024-12-03T18:57:30,122 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster-HFileCleaner.small.0-1733252198503 {}] cleaner.HFileCleaner(306): Exit Thread[master/db5a5ccf5be8:0:becomeActiveMaster-HFileCleaner.small.0-1733252198503,5,FailOnTimeoutGroup] 2024-12-03T18:57:30,123 INFO [M:0;db5a5ccf5be8:34057 {}] hbase.ChoreService(370): Chore service for: master/db5a5ccf5be8:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-03T18:57:30,123 INFO [M:0;db5a5ccf5be8:34057 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-03T18:57:30,123 DEBUG [M:0;db5a5ccf5be8:34057 {}] master.HMaster(1795): Stopping service threads 2024-12-03T18:57:30,123 INFO [M:0;db5a5ccf5be8:34057 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-03T18:57:30,123 INFO [M:0;db5a5ccf5be8:34057 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-03T18:57:30,123 INFO [M:0;db5a5ccf5be8:34057 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-03T18:57:30,123 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-03T18:57:30,132 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34057-0x1019c8da9ee0000, quorum=127.0.0.1:60004, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-03T18:57:30,132 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34057-0x1019c8da9ee0000, quorum=127.0.0.1:60004, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T18:57:30,132 DEBUG [M:0;db5a5ccf5be8:34057 {}] zookeeper.ZKUtil(347): master:34057-0x1019c8da9ee0000, quorum=127.0.0.1:60004, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-03T18:57:30,133 WARN [M:0;db5a5ccf5be8:34057 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-03T18:57:30,133 INFO [M:0;db5a5ccf5be8:34057 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:37713/user/jenkins/test-data/3b55791f-cea7-8ac8-db4f-7cb0af809f0f/.lastflushedseqids 2024-12-03T18:57:30,142 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41215 is added to blk_1073741849_1025 (size=130) 2024-12-03T18:57:30,142 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34083 is added to blk_1073741849_1025 (size=130) 2024-12-03T18:57:30,143 INFO [M:0;db5a5ccf5be8:34057 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-03T18:57:30,143 INFO [M:0;db5a5ccf5be8:34057 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-03T18:57:30,143 DEBUG [M:0;db5a5ccf5be8:34057 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-03T18:57:30,143 INFO [M:0;db5a5ccf5be8:34057 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T18:57:30,143 DEBUG [M:0;db5a5ccf5be8:34057 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T18:57:30,143 DEBUG [M:0;db5a5ccf5be8:34057 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-03T18:57:30,143 DEBUG [M:0;db5a5ccf5be8:34057 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T18:57:30,144 INFO [M:0;db5a5ccf5be8:34057 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=43.53 KB heapSize=54.87 KB 2024-12-03T18:57:30,160 DEBUG [M:0;db5a5ccf5be8:34057 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37713/user/jenkins/test-data/3b55791f-cea7-8ac8-db4f-7cb0af809f0f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/372313e53bc143339db13e367897a362 is 82, key is hbase:meta,,1/info:regioninfo/1733252199186/Put/seqid=0 2024-12-03T18:57:30,164 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41215 is added to blk_1073741850_1026 (size=5672) 2024-12-03T18:57:30,165 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34083 is added to blk_1073741850_1026 (size=5672) 2024-12-03T18:57:30,165 INFO [M:0;db5a5ccf5be8:34057 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:37713/user/jenkins/test-data/3b55791f-cea7-8ac8-db4f-7cb0af809f0f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/372313e53bc143339db13e367897a362 2024-12-03T18:57:30,181 DEBUG [M:0;db5a5ccf5be8:34057 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37713/user/jenkins/test-data/3b55791f-cea7-8ac8-db4f-7cb0af809f0f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/043fa3f8582a4a138fcd6faad479dd16 is 797, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1733252199656/Put/seqid=0 2024-12-03T18:57:30,186 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34083 is added to blk_1073741851_1027 (size=7816) 2024-12-03T18:57:30,186 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41215 is added to blk_1073741851_1027 (size=7816) 2024-12-03T18:57:30,186 INFO [M:0;db5a5ccf5be8:34057 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.92 KB at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:37713/user/jenkins/test-data/3b55791f-cea7-8ac8-db4f-7cb0af809f0f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/043fa3f8582a4a138fcd6faad479dd16 2024-12-03T18:57:30,190 INFO [M:0;db5a5ccf5be8:34057 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 043fa3f8582a4a138fcd6faad479dd16 2024-12-03T18:57:30,203 DEBUG [M:0;db5a5ccf5be8:34057 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37713/user/jenkins/test-data/3b55791f-cea7-8ac8-db4f-7cb0af809f0f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/7b35c53cdb184d479f287b050f219389 is 69, key is db5a5ccf5be8,39923,1733252198139/rs:state/1733252198605/Put/seqid=0 2024-12-03T18:57:30,207 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34083 is added to blk_1073741852_1028 (size=5156) 2024-12-03T18:57:30,207 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41215 is added to blk_1073741852_1028 (size=5156) 2024-12-03T18:57:30,208 INFO [M:0;db5a5ccf5be8:34057 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:37713/user/jenkins/test-data/3b55791f-cea7-8ac8-db4f-7cb0af809f0f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/7b35c53cdb184d479f287b050f219389 2024-12-03T18:57:30,212 INFO [RS:0;db5a5ccf5be8:39923 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-03T18:57:30,212 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39923-0x1019c8da9ee0001, quorum=127.0.0.1:60004, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-03T18:57:30,212 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39923-0x1019c8da9ee0001, quorum=127.0.0.1:60004, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-03T18:57:30,212 INFO [RS:0;db5a5ccf5be8:39923 {}] regionserver.HRegionServer(1031): Exiting; stopping=db5a5ccf5be8,39923,1733252198139; zookeeper connection closed. 2024-12-03T18:57:30,212 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@4a3249bc {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@4a3249bc 2024-12-03T18:57:30,212 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-12-03T18:57:30,225 DEBUG [M:0;db5a5ccf5be8:34057 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37713/user/jenkins/test-data/3b55791f-cea7-8ac8-db4f-7cb0af809f0f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/21760c7832704e99a6538d2a198e3d7e is 52, key is load_balancer_on/state:d/1733252199277/Put/seqid=0 2024-12-03T18:57:30,229 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34083 is added to blk_1073741853_1029 (size=5056) 2024-12-03T18:57:30,229 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41215 is added to blk_1073741853_1029 (size=5056) 2024-12-03T18:57:30,230 INFO [M:0;db5a5ccf5be8:34057 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:37713/user/jenkins/test-data/3b55791f-cea7-8ac8-db4f-7cb0af809f0f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/21760c7832704e99a6538d2a198e3d7e 2024-12-03T18:57:30,234 DEBUG [M:0;db5a5ccf5be8:34057 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37713/user/jenkins/test-data/3b55791f-cea7-8ac8-db4f-7cb0af809f0f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/372313e53bc143339db13e367897a362 as hdfs://localhost:37713/user/jenkins/test-data/3b55791f-cea7-8ac8-db4f-7cb0af809f0f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/372313e53bc143339db13e367897a362 2024-12-03T18:57:30,239 INFO [M:0;db5a5ccf5be8:34057 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37713/user/jenkins/test-data/3b55791f-cea7-8ac8-db4f-7cb0af809f0f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/372313e53bc143339db13e367897a362, entries=8, sequenceid=121, filesize=5.5 K 2024-12-03T18:57:30,240 DEBUG [M:0;db5a5ccf5be8:34057 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37713/user/jenkins/test-data/3b55791f-cea7-8ac8-db4f-7cb0af809f0f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/043fa3f8582a4a138fcd6faad479dd16 as hdfs://localhost:37713/user/jenkins/test-data/3b55791f-cea7-8ac8-db4f-7cb0af809f0f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/043fa3f8582a4a138fcd6faad479dd16 2024-12-03T18:57:30,244 INFO [M:0;db5a5ccf5be8:34057 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 043fa3f8582a4a138fcd6faad479dd16 2024-12-03T18:57:30,244 INFO [M:0;db5a5ccf5be8:34057 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37713/user/jenkins/test-data/3b55791f-cea7-8ac8-db4f-7cb0af809f0f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/043fa3f8582a4a138fcd6faad479dd16, entries=14, sequenceid=121, filesize=7.6 K 2024-12-03T18:57:30,245 DEBUG [M:0;db5a5ccf5be8:34057 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37713/user/jenkins/test-data/3b55791f-cea7-8ac8-db4f-7cb0af809f0f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/7b35c53cdb184d479f287b050f219389 as hdfs://localhost:37713/user/jenkins/test-data/3b55791f-cea7-8ac8-db4f-7cb0af809f0f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/7b35c53cdb184d479f287b050f219389 2024-12-03T18:57:30,250 INFO [M:0;db5a5ccf5be8:34057 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37713/user/jenkins/test-data/3b55791f-cea7-8ac8-db4f-7cb0af809f0f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/7b35c53cdb184d479f287b050f219389, entries=1, sequenceid=121, filesize=5.0 K 2024-12-03T18:57:30,251 DEBUG [M:0;db5a5ccf5be8:34057 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37713/user/jenkins/test-data/3b55791f-cea7-8ac8-db4f-7cb0af809f0f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/21760c7832704e99a6538d2a198e3d7e as hdfs://localhost:37713/user/jenkins/test-data/3b55791f-cea7-8ac8-db4f-7cb0af809f0f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/21760c7832704e99a6538d2a198e3d7e 2024-12-03T18:57:30,255 INFO [M:0;db5a5ccf5be8:34057 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37713/user/jenkins/test-data/3b55791f-cea7-8ac8-db4f-7cb0af809f0f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/21760c7832704e99a6538d2a198e3d7e, entries=1, sequenceid=121, filesize=4.9 K 2024-12-03T18:57:30,256 INFO [M:0;db5a5ccf5be8:34057 {}] regionserver.HRegion(3140): Finished flush of dataSize ~43.53 KB/44572, heapSize ~54.80 KB/56120, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 113ms, sequenceid=121, compaction requested=false 2024-12-03T18:57:30,258 INFO [M:0;db5a5ccf5be8:34057 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T18:57:30,258 DEBUG [M:0;db5a5ccf5be8:34057 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733252250143Disabling compacts and flushes for region at 1733252250143Disabling writes for close at 1733252250143Obtaining lock to block concurrent updates at 1733252250144 (+1 ms)Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733252250144Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=44572, getHeapSize=56120, getOffHeapSize=0, getCellsCount=140 at 1733252250144Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733252250145 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733252250145Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733252250160 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733252250160Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733252250169 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733252250181 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733252250181Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733252250190 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733252250202 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733252250202Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1733252250213 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1733252250224 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1733252250224Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@68bde033: reopening flushed file at 1733252250234 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@43d5be51: reopening flushed file at 1733252250239 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5fffb0a7: reopening flushed file at 1733252250244 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@796bcd7e: reopening flushed file at 1733252250250 (+6 ms)Finished flush of dataSize ~43.53 KB/44572, heapSize ~54.80 KB/56120, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 113ms, sequenceid=121, compaction requested=false at 1733252250256 (+6 ms)Writing region close event to WAL at 1733252250258 (+2 ms)Closed at 1733252250258 2024-12-03T18:57:30,259 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:57:30,259 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:57:30,259 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:57:30,259 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:57:30,259 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:57:30,261 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41215 is added to blk_1073741830_1006 (size=52969) 2024-12-03T18:57:30,261 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34083 is added to blk_1073741830_1006 (size=52969) 2024-12-03T18:57:30,262 INFO [M:0;db5a5ccf5be8:34057 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-03T18:57:30,262 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-03T18:57:30,262 INFO [M:0;db5a5ccf5be8:34057 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:34057 2024-12-03T18:57:30,263 INFO [M:0;db5a5ccf5be8:34057 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-03T18:57:30,370 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34057-0x1019c8da9ee0000, quorum=127.0.0.1:60004, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-03T18:57:30,370 INFO [M:0;db5a5ccf5be8:34057 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-03T18:57:30,370 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34057-0x1019c8da9ee0000, quorum=127.0.0.1:60004, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-03T18:57:30,373 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@34fe4f6c{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T18:57:30,373 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6623a859{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-03T18:57:30,373 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-03T18:57:30,373 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1beefc80{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-03T18:57:30,373 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@523d16c3{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ddad0867-e686-affa-191f-3afcc7d4395f/hadoop.log.dir/,STOPPED} 2024-12-03T18:57:30,376 WARN [BP-1336678484-172.17.0.2-1733252195406 heartbeating to localhost/127.0.0.1:37713 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-03T18:57:30,376 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-03T18:57:30,376 WARN [BP-1336678484-172.17.0.2-1733252195406 heartbeating to localhost/127.0.0.1:37713 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1336678484-172.17.0.2-1733252195406 (Datanode Uuid 7dab5863-8acc-4081-81a2-c09abff57dac) service to localhost/127.0.0.1:37713 2024-12-03T18:57:30,376 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-03T18:57:30,376 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ddad0867-e686-affa-191f-3afcc7d4395f/cluster_e0760362-1a4b-c678-a93c-a55a7d70ae0d/data/data3/current/BP-1336678484-172.17.0.2-1733252195406 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T18:57:30,377 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ddad0867-e686-affa-191f-3afcc7d4395f/cluster_e0760362-1a4b-c678-a93c-a55a7d70ae0d/data/data4/current/BP-1336678484-172.17.0.2-1733252195406 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T18:57:30,377 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-03T18:57:30,380 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3165abde{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T18:57:30,380 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4911f4c5{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-03T18:57:30,380 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-03T18:57:30,380 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3be31a0b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-03T18:57:30,380 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4d36967f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ddad0867-e686-affa-191f-3afcc7d4395f/hadoop.log.dir/,STOPPED} 2024-12-03T18:57:30,382 WARN [BP-1336678484-172.17.0.2-1733252195406 heartbeating to localhost/127.0.0.1:37713 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-03T18:57:30,382 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-03T18:57:30,382 WARN [BP-1336678484-172.17.0.2-1733252195406 heartbeating to localhost/127.0.0.1:37713 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1336678484-172.17.0.2-1733252195406 (Datanode Uuid c9729e61-da1d-4936-985e-9c5414b076b8) service to localhost/127.0.0.1:37713 2024-12-03T18:57:30,382 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-03T18:57:30,383 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ddad0867-e686-affa-191f-3afcc7d4395f/cluster_e0760362-1a4b-c678-a93c-a55a7d70ae0d/data/data1/current/BP-1336678484-172.17.0.2-1733252195406 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T18:57:30,383 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ddad0867-e686-affa-191f-3afcc7d4395f/cluster_e0760362-1a4b-c678-a93c-a55a7d70ae0d/data/data2/current/BP-1336678484-172.17.0.2-1733252195406 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T18:57:30,383 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-03T18:57:30,390 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6c243e85{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-03T18:57:30,390 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4cf5e3df{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-03T18:57:30,391 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-03T18:57:30,391 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2aaa4790{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-03T18:57:30,391 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@47d3f616{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ddad0867-e686-affa-191f-3afcc7d4395f/hadoop.log.dir/,STOPPED} 2024-12-03T18:57:30,397 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-03T18:57:30,418 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-03T18:57:30,427 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testCompactionRecordDoesntBlockRolling Thread=207 (was 181) Potentially hanging thread: HMaster-EventLoopGroup-12-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:37713 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:37713 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:37713 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-16 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:37713 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:37713 from jenkins.hfs.5 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-34-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.5@localhost:37713 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: regionserver/db5a5ccf5be8:0.leaseChecker java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.regionserver.LeaseManager.run(LeaseManager.java:82) Potentially hanging thread: nioEventLoopGroup-36-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:37713 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:37713 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=486 (was 455) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=142 (was 196), ProcessCount=11 (was 11), AvailableMemoryMB=6226 (was 6095) - AvailableMemoryMB LEAK? - 2024-12-03T18:57:30,435 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRolling Thread=207, OpenFileDescriptor=486, MaxFileDescriptor=1048576, SystemLoadAverage=142, ProcessCount=11, AvailableMemoryMB=6226 2024-12-03T18:57:30,435 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-03T18:57:30,435 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ddad0867-e686-affa-191f-3afcc7d4395f/hadoop.log.dir so I do NOT create it in target/test-data/3bac46c1-7bdc-fefe-b57d-f71e0ef783ba 2024-12-03T18:57:30,435 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ddad0867-e686-affa-191f-3afcc7d4395f/hadoop.tmp.dir so I do NOT create it in target/test-data/3bac46c1-7bdc-fefe-b57d-f71e0ef783ba 2024-12-03T18:57:30,435 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3bac46c1-7bdc-fefe-b57d-f71e0ef783ba/cluster_f08de17b-ace7-a366-8397-2ec1db47c248, deleteOnExit=true 2024-12-03T18:57:30,435 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-03T18:57:30,435 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3bac46c1-7bdc-fefe-b57d-f71e0ef783ba/test.cache.data in system properties and HBase conf 2024-12-03T18:57:30,435 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3bac46c1-7bdc-fefe-b57d-f71e0ef783ba/hadoop.tmp.dir in system properties and HBase conf 2024-12-03T18:57:30,436 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3bac46c1-7bdc-fefe-b57d-f71e0ef783ba/hadoop.log.dir in system properties and HBase conf 2024-12-03T18:57:30,436 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3bac46c1-7bdc-fefe-b57d-f71e0ef783ba/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-03T18:57:30,436 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3bac46c1-7bdc-fefe-b57d-f71e0ef783ba/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-03T18:57:30,436 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-03T18:57:30,436 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-03T18:57:30,436 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3bac46c1-7bdc-fefe-b57d-f71e0ef783ba/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-03T18:57:30,436 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3bac46c1-7bdc-fefe-b57d-f71e0ef783ba/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-03T18:57:30,436 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3bac46c1-7bdc-fefe-b57d-f71e0ef783ba/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-03T18:57:30,436 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3bac46c1-7bdc-fefe-b57d-f71e0ef783ba/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-03T18:57:30,436 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3bac46c1-7bdc-fefe-b57d-f71e0ef783ba/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-03T18:57:30,436 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3bac46c1-7bdc-fefe-b57d-f71e0ef783ba/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-03T18:57:30,436 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3bac46c1-7bdc-fefe-b57d-f71e0ef783ba/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-03T18:57:30,436 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3bac46c1-7bdc-fefe-b57d-f71e0ef783ba/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-03T18:57:30,436 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3bac46c1-7bdc-fefe-b57d-f71e0ef783ba/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-03T18:57:30,436 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3bac46c1-7bdc-fefe-b57d-f71e0ef783ba/nfs.dump.dir in system properties and HBase conf 2024-12-03T18:57:30,437 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3bac46c1-7bdc-fefe-b57d-f71e0ef783ba/java.io.tmpdir in system properties and HBase conf 2024-12-03T18:57:30,437 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3bac46c1-7bdc-fefe-b57d-f71e0ef783ba/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-03T18:57:30,437 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3bac46c1-7bdc-fefe-b57d-f71e0ef783ba/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-03T18:57:30,437 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3bac46c1-7bdc-fefe-b57d-f71e0ef783ba/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-03T18:57:30,444 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,44293,1733252112865/db5a5ccf5be8%2C44293%2C1733252112865.1733252113091 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:57:30,448 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-03T18:57:30,476 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.meta.1733252112663.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:57:30,629 INFO [regionserver/db5a5ccf5be8:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-03T18:57:30,802 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-03T18:57:30,805 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-03T18:57:30,806 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-03T18:57:30,806 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-03T18:57:30,806 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-03T18:57:30,807 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-03T18:57:30,807 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@45bda0cb{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3bac46c1-7bdc-fefe-b57d-f71e0ef783ba/hadoop.log.dir/,AVAILABLE} 2024-12-03T18:57:30,807 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4881a2ed{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-03T18:57:30,906 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@43909889{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3bac46c1-7bdc-fefe-b57d-f71e0ef783ba/java.io.tmpdir/jetty-localhost-33887-hadoop-hdfs-3_4_1-tests_jar-_-any-16945766496817254648/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-03T18:57:30,906 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4ac7d52f{HTTP/1.1, (http/1.1)}{localhost:33887} 2024-12-03T18:57:30,906 INFO [Time-limited test {}] server.Server(415): Started @248783ms 2024-12-03T18:57:30,918 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-03T18:57:31,198 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-03T18:57:31,200 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-03T18:57:31,201 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-03T18:57:31,201 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-03T18:57:31,201 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-03T18:57:31,201 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@41b7d19a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3bac46c1-7bdc-fefe-b57d-f71e0ef783ba/hadoop.log.dir/,AVAILABLE} 2024-12-03T18:57:31,201 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5551c062{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-03T18:57:31,304 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1204fb24{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3bac46c1-7bdc-fefe-b57d-f71e0ef783ba/java.io.tmpdir/jetty-localhost-40669-hadoop-hdfs-3_4_1-tests_jar-_-any-9203850472066642012/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T18:57:31,304 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@23e1642c{HTTP/1.1, (http/1.1)}{localhost:40669} 2024-12-03T18:57:31,304 INFO [Time-limited test {}] server.Server(415): Started @249181ms 2024-12-03T18:57:31,305 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-03T18:57:31,328 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-03T18:57:31,330 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-03T18:57:31,331 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-03T18:57:31,331 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-03T18:57:31,331 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-03T18:57:31,335 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4437c7ec{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3bac46c1-7bdc-fefe-b57d-f71e0ef783ba/hadoop.log.dir/,AVAILABLE} 2024-12-03T18:57:31,335 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6a5db76d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-03T18:57:31,439 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5ce0a24{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3bac46c1-7bdc-fefe-b57d-f71e0ef783ba/java.io.tmpdir/jetty-localhost-37851-hadoop-hdfs-3_4_1-tests_jar-_-any-16303137136267989661/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T18:57:31,440 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@47bcda8c{HTTP/1.1, (http/1.1)}{localhost:37851} 2024-12-03T18:57:31,440 INFO [Time-limited test {}] server.Server(415): Started @249316ms 2024-12-03T18:57:31,441 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-03T18:57:31,445 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,44293,1733252112865/db5a5ccf5be8%2C44293%2C1733252112865.1733252113091 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:57:31,476 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.meta.1733252112663.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:57:32,445 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,44293,1733252112865/db5a5ccf5be8%2C44293%2C1733252112865.1733252113091 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:57:32,477 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.meta.1733252112663.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:57:32,700 WARN [Thread-1985 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3bac46c1-7bdc-fefe-b57d-f71e0ef783ba/cluster_f08de17b-ace7-a366-8397-2ec1db47c248/data/data1/current/BP-1603513973-172.17.0.2-1733252250452/current, will proceed with Du for space computation calculation, 2024-12-03T18:57:32,701 WARN [Thread-1986 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3bac46c1-7bdc-fefe-b57d-f71e0ef783ba/cluster_f08de17b-ace7-a366-8397-2ec1db47c248/data/data2/current/BP-1603513973-172.17.0.2-1733252250452/current, will proceed with Du for space computation calculation, 2024-12-03T18:57:32,714 WARN [Thread-1949 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-03T18:57:32,716 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xf2fc7427148d96d9 with lease ID 0xb46d313ba95a4f22: Processing first storage report for DS-4eefd983-e02a-4ae3-a684-27e29c58eeba from datanode DatanodeRegistration(127.0.0.1:44849, datanodeUuid=acc0d782-1206-49a3-a9c7-501e90658194, infoPort=40925, infoSecurePort=0, ipcPort=36735, storageInfo=lv=-57;cid=testClusterID;nsid=452266573;c=1733252250452) 2024-12-03T18:57:32,716 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf2fc7427148d96d9 with lease ID 0xb46d313ba95a4f22: from storage DS-4eefd983-e02a-4ae3-a684-27e29c58eeba node DatanodeRegistration(127.0.0.1:44849, datanodeUuid=acc0d782-1206-49a3-a9c7-501e90658194, infoPort=40925, infoSecurePort=0, ipcPort=36735, storageInfo=lv=-57;cid=testClusterID;nsid=452266573;c=1733252250452), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-03T18:57:32,716 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xf2fc7427148d96d9 with lease ID 0xb46d313ba95a4f22: Processing first storage report for DS-c59dccbf-681f-4676-bd22-ed9334260879 from datanode DatanodeRegistration(127.0.0.1:44849, datanodeUuid=acc0d782-1206-49a3-a9c7-501e90658194, infoPort=40925, infoSecurePort=0, ipcPort=36735, storageInfo=lv=-57;cid=testClusterID;nsid=452266573;c=1733252250452) 2024-12-03T18:57:32,716 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf2fc7427148d96d9 with lease ID 0xb46d313ba95a4f22: from storage DS-c59dccbf-681f-4676-bd22-ed9334260879 node DatanodeRegistration(127.0.0.1:44849, datanodeUuid=acc0d782-1206-49a3-a9c7-501e90658194, infoPort=40925, infoSecurePort=0, ipcPort=36735, storageInfo=lv=-57;cid=testClusterID;nsid=452266573;c=1733252250452), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-03T18:57:32,926 WARN [Thread-1996 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3bac46c1-7bdc-fefe-b57d-f71e0ef783ba/cluster_f08de17b-ace7-a366-8397-2ec1db47c248/data/data3/current/BP-1603513973-172.17.0.2-1733252250452/current, will proceed with Du for space computation calculation, 2024-12-03T18:57:32,926 WARN [Thread-1997 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3bac46c1-7bdc-fefe-b57d-f71e0ef783ba/cluster_f08de17b-ace7-a366-8397-2ec1db47c248/data/data4/current/BP-1603513973-172.17.0.2-1733252250452/current, will proceed with Du for space computation calculation, 2024-12-03T18:57:32,944 WARN [Thread-1972 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-03T18:57:32,946 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x86de5f9e8100fa1c with lease ID 0xb46d313ba95a4f23: Processing first storage report for DS-624d3c45-3475-45e1-852e-de7a97c3ab94 from datanode DatanodeRegistration(127.0.0.1:33431, datanodeUuid=0d2a7dac-d708-4f9e-b6e1-0fcb96f23176, infoPort=43093, infoSecurePort=0, ipcPort=33829, storageInfo=lv=-57;cid=testClusterID;nsid=452266573;c=1733252250452) 2024-12-03T18:57:32,946 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x86de5f9e8100fa1c with lease ID 0xb46d313ba95a4f23: from storage DS-624d3c45-3475-45e1-852e-de7a97c3ab94 node DatanodeRegistration(127.0.0.1:33431, datanodeUuid=0d2a7dac-d708-4f9e-b6e1-0fcb96f23176, infoPort=43093, infoSecurePort=0, ipcPort=33829, storageInfo=lv=-57;cid=testClusterID;nsid=452266573;c=1733252250452), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-03T18:57:32,947 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x86de5f9e8100fa1c with lease ID 0xb46d313ba95a4f23: Processing first storage report for DS-8d55ed94-1e49-4e3f-9856-59e8cbf9b2ea from datanode DatanodeRegistration(127.0.0.1:33431, datanodeUuid=0d2a7dac-d708-4f9e-b6e1-0fcb96f23176, infoPort=43093, infoSecurePort=0, ipcPort=33829, storageInfo=lv=-57;cid=testClusterID;nsid=452266573;c=1733252250452) 2024-12-03T18:57:32,947 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x86de5f9e8100fa1c with lease ID 0xb46d313ba95a4f23: from storage DS-8d55ed94-1e49-4e3f-9856-59e8cbf9b2ea node DatanodeRegistration(127.0.0.1:33431, datanodeUuid=0d2a7dac-d708-4f9e-b6e1-0fcb96f23176, infoPort=43093, infoSecurePort=0, ipcPort=33829, storageInfo=lv=-57;cid=testClusterID;nsid=452266573;c=1733252250452), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-03T18:57:32,974 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3bac46c1-7bdc-fefe-b57d-f71e0ef783ba 2024-12-03T18:57:32,980 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3bac46c1-7bdc-fefe-b57d-f71e0ef783ba/cluster_f08de17b-ace7-a366-8397-2ec1db47c248/zookeeper_0, clientPort=56149, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3bac46c1-7bdc-fefe-b57d-f71e0ef783ba/cluster_f08de17b-ace7-a366-8397-2ec1db47c248/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3bac46c1-7bdc-fefe-b57d-f71e0ef783ba/cluster_f08de17b-ace7-a366-8397-2ec1db47c248/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-03T18:57:32,981 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=56149 2024-12-03T18:57:32,982 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T18:57:32,984 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T18:57:32,992 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44849 is added to blk_1073741825_1001 (size=7) 2024-12-03T18:57:32,993 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33431 is added to blk_1073741825_1001 (size=7) 2024-12-03T18:57:32,994 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3 with version=8 2024-12-03T18:57:32,994 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:42673/user/jenkins/test-data/6807bb60-f78c-0ede-7870-b94fe8933208/hbase-staging 2024-12-03T18:57:32,995 INFO [Time-limited test {}] client.ConnectionUtils(128): master/db5a5ccf5be8:0 server-side Connection retries=45 2024-12-03T18:57:32,996 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-03T18:57:32,996 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-03T18:57:32,996 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-03T18:57:32,996 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-03T18:57:32,996 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-03T18:57:32,996 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-03T18:57:32,996 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-03T18:57:32,996 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:34991 2024-12-03T18:57:32,997 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:34991 connecting to ZooKeeper ensemble=127.0.0.1:56149 2024-12-03T18:57:33,052 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:349910x0, quorum=127.0.0.1:56149, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-03T18:57:33,052 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:34991-0x1019c8e80dc0000 connected 2024-12-03T18:57:33,137 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T18:57:33,140 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T18:57:33,143 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:34991-0x1019c8e80dc0000, quorum=127.0.0.1:56149, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-03T18:57:33,144 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3, hbase.cluster.distributed=false 2024-12-03T18:57:33,147 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:34991-0x1019c8e80dc0000, quorum=127.0.0.1:56149, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-03T18:57:33,148 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=34991 2024-12-03T18:57:33,148 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=34991 2024-12-03T18:57:33,148 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=34991 2024-12-03T18:57:33,149 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=34991 2024-12-03T18:57:33,149 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=34991 2024-12-03T18:57:33,165 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/db5a5ccf5be8:0 server-side Connection retries=45 2024-12-03T18:57:33,165 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-03T18:57:33,165 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-03T18:57:33,165 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-03T18:57:33,165 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-03T18:57:33,165 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-03T18:57:33,165 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-03T18:57:33,165 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-03T18:57:33,166 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:38091 2024-12-03T18:57:33,167 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:38091 connecting to ZooKeeper ensemble=127.0.0.1:56149 2024-12-03T18:57:33,167 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T18:57:33,168 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T18:57:33,178 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:380910x0, quorum=127.0.0.1:56149, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-03T18:57:33,178 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38091-0x1019c8e80dc0001, quorum=127.0.0.1:56149, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-03T18:57:33,178 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:38091-0x1019c8e80dc0001 connected 2024-12-03T18:57:33,179 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-03T18:57:33,179 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-03T18:57:33,180 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38091-0x1019c8e80dc0001, quorum=127.0.0.1:56149, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-03T18:57:33,181 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38091-0x1019c8e80dc0001, quorum=127.0.0.1:56149, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-03T18:57:33,182 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=38091 2024-12-03T18:57:33,182 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=38091 2024-12-03T18:57:33,182 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=38091 2024-12-03T18:57:33,183 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=38091 2024-12-03T18:57:33,183 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=38091 2024-12-03T18:57:33,194 DEBUG [M:0;db5a5ccf5be8:34991 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;db5a5ccf5be8:34991 2024-12-03T18:57:33,195 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/db5a5ccf5be8,34991,1733252252995 2024-12-03T18:57:33,199 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34991-0x1019c8e80dc0000, quorum=127.0.0.1:56149, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-03T18:57:33,199 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38091-0x1019c8e80dc0001, quorum=127.0.0.1:56149, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-03T18:57:33,200 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:34991-0x1019c8e80dc0000, quorum=127.0.0.1:56149, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/db5a5ccf5be8,34991,1733252252995 2024-12-03T18:57:33,209 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38091-0x1019c8e80dc0001, quorum=127.0.0.1:56149, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-03T18:57:33,209 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34991-0x1019c8e80dc0000, quorum=127.0.0.1:56149, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T18:57:33,210 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38091-0x1019c8e80dc0001, quorum=127.0.0.1:56149, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T18:57:33,210 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:34991-0x1019c8e80dc0000, quorum=127.0.0.1:56149, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-03T18:57:33,210 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/db5a5ccf5be8,34991,1733252252995 from backup master directory 2024-12-03T18:57:33,220 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38091-0x1019c8e80dc0001, quorum=127.0.0.1:56149, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-03T18:57:33,220 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34991-0x1019c8e80dc0000, quorum=127.0.0.1:56149, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/db5a5ccf5be8,34991,1733252252995 2024-12-03T18:57:33,220 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34991-0x1019c8e80dc0000, quorum=127.0.0.1:56149, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-03T18:57:33,220 WARN [master/db5a5ccf5be8:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-03T18:57:33,220 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=db5a5ccf5be8,34991,1733252252995 2024-12-03T18:57:33,224 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/hbase.id] with ID: ca1dad72-2a88-4428-a680-fcc535c8a12c 2024-12-03T18:57:33,224 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/.tmp/hbase.id 2024-12-03T18:57:33,228 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33431 is added to blk_1073741826_1002 (size=42) 2024-12-03T18:57:33,229 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44849 is added to blk_1073741826_1002 (size=42) 2024-12-03T18:57:33,229 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/.tmp/hbase.id]:[hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/hbase.id] 2024-12-03T18:57:33,242 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T18:57:33,242 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-03T18:57:33,244 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-12-03T18:57:33,252 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34991-0x1019c8e80dc0000, quorum=127.0.0.1:56149, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T18:57:33,252 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38091-0x1019c8e80dc0001, quorum=127.0.0.1:56149, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T18:57:33,257 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33431 is added to blk_1073741827_1003 (size=196) 2024-12-03T18:57:33,257 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44849 is added to blk_1073741827_1003 (size=196) 2024-12-03T18:57:33,259 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-03T18:57:33,260 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-03T18:57:33,261 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-03T18:57:33,267 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44849 is added to blk_1073741828_1004 (size=1189) 2024-12-03T18:57:33,267 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33431 is added to blk_1073741828_1004 (size=1189) 2024-12-03T18:57:33,268 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/MasterData/data/master/store 2024-12-03T18:57:33,272 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44849 is added to blk_1073741829_1005 (size=34) 2024-12-03T18:57:33,272 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33431 is added to blk_1073741829_1005 (size=34) 2024-12-03T18:57:33,273 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T18:57:33,273 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-03T18:57:33,273 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T18:57:33,273 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T18:57:33,273 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-03T18:57:33,273 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T18:57:33,273 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T18:57:33,273 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733252253273Disabling compacts and flushes for region at 1733252253273Disabling writes for close at 1733252253273Writing region close event to WAL at 1733252253273Closed at 1733252253273 2024-12-03T18:57:33,273 WARN [master/db5a5ccf5be8:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/MasterData/data/master/store/.initializing 2024-12-03T18:57:33,273 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/MasterData/WALs/db5a5ccf5be8,34991,1733252252995 2024-12-03T18:57:33,275 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=db5a5ccf5be8%2C34991%2C1733252252995, suffix=, logDir=hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/MasterData/WALs/db5a5ccf5be8,34991,1733252252995, archiveDir=hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/MasterData/oldWALs, maxLogs=10 2024-12-03T18:57:33,276 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor db5a5ccf5be8%2C34991%2C1733252252995.1733252253276 2024-12-03T18:57:33,281 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/MasterData/WALs/db5a5ccf5be8,34991,1733252252995/db5a5ccf5be8%2C34991%2C1733252252995.1733252253276 2024-12-03T18:57:33,287 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40925:40925),(127.0.0.1/127.0.0.1:43093:43093)] 2024-12-03T18:57:33,288 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-03T18:57:33,288 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T18:57:33,288 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-03T18:57:33,288 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-03T18:57:33,290 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-03T18:57:33,291 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-03T18:57:33,291 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T18:57:33,292 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T18:57:33,292 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-03T18:57:33,293 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-03T18:57:33,293 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T18:57:33,293 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T18:57:33,293 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-03T18:57:33,294 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-03T18:57:33,294 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T18:57:33,295 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T18:57:33,295 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-03T18:57:33,296 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-03T18:57:33,296 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T18:57:33,296 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T18:57:33,296 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-03T18:57:33,297 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-03T18:57:33,297 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-03T18:57:33,298 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-03T18:57:33,298 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-03T18:57:33,299 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-03T18:57:33,300 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-03T18:57:33,302 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-03T18:57:33,302 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=804172, jitterRate=0.022558137774467468}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-03T18:57:33,303 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733252253288Initializing all the Stores at 1733252253289 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733252253289Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733252253290 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733252253290Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733252253290Cleaning up temporary data from old regions at 1733252253298 (+8 ms)Region opened successfully at 1733252253303 (+5 ms) 2024-12-03T18:57:33,303 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-03T18:57:33,306 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@621eb7bf, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=db5a5ccf5be8/172.17.0.2:0 2024-12-03T18:57:33,307 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-03T18:57:33,307 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-03T18:57:33,307 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-03T18:57:33,307 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-03T18:57:33,308 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-12-03T18:57:33,308 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-12-03T18:57:33,308 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-03T18:57:33,310 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-03T18:57:33,311 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34991-0x1019c8e80dc0000, quorum=127.0.0.1:56149, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-03T18:57:33,323 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-03T18:57:33,324 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-03T18:57:33,325 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34991-0x1019c8e80dc0000, quorum=127.0.0.1:56149, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-03T18:57:33,336 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-03T18:57:33,336 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-03T18:57:33,338 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34991-0x1019c8e80dc0000, quorum=127.0.0.1:56149, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-03T18:57:33,347 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-03T18:57:33,348 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34991-0x1019c8e80dc0000, quorum=127.0.0.1:56149, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-03T18:57:33,357 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-03T18:57:33,363 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34991-0x1019c8e80dc0000, quorum=127.0.0.1:56149, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-03T18:57:33,368 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-03T18:57:33,378 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34991-0x1019c8e80dc0000, quorum=127.0.0.1:56149, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-03T18:57:33,378 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34991-0x1019c8e80dc0000, quorum=127.0.0.1:56149, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T18:57:33,378 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38091-0x1019c8e80dc0001, quorum=127.0.0.1:56149, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-03T18:57:33,378 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38091-0x1019c8e80dc0001, quorum=127.0.0.1:56149, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T18:57:33,379 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=db5a5ccf5be8,34991,1733252252995, sessionid=0x1019c8e80dc0000, setting cluster-up flag (Was=false) 2024-12-03T18:57:33,399 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38091-0x1019c8e80dc0001, quorum=127.0.0.1:56149, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T18:57:33,399 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34991-0x1019c8e80dc0000, quorum=127.0.0.1:56149, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T18:57:33,431 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-03T18:57:33,433 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=db5a5ccf5be8,34991,1733252252995 2024-12-03T18:57:33,446 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,44293,1733252112865/db5a5ccf5be8%2C44293%2C1733252112865.1733252113091 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:57:33,452 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34991-0x1019c8e80dc0000, quorum=127.0.0.1:56149, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T18:57:33,452 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38091-0x1019c8e80dc0001, quorum=127.0.0.1:56149, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T18:57:33,478 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.meta.1733252112663.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:57:33,483 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-03T18:57:33,484 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=db5a5ccf5be8,34991,1733252252995 2024-12-03T18:57:33,485 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-03T18:57:33,487 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-03T18:57:33,488 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-03T18:57:33,488 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-03T18:57:33,488 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: db5a5ccf5be8,34991,1733252252995 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-03T18:57:33,489 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/db5a5ccf5be8:0, corePoolSize=5, maxPoolSize=5 2024-12-03T18:57:33,489 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/db5a5ccf5be8:0, corePoolSize=5, maxPoolSize=5 2024-12-03T18:57:33,489 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/db5a5ccf5be8:0, corePoolSize=5, maxPoolSize=5 2024-12-03T18:57:33,489 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/db5a5ccf5be8:0, corePoolSize=5, maxPoolSize=5 2024-12-03T18:57:33,489 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/db5a5ccf5be8:0, corePoolSize=10, maxPoolSize=10 2024-12-03T18:57:33,489 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/db5a5ccf5be8:0, corePoolSize=1, maxPoolSize=1 2024-12-03T18:57:33,490 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/db5a5ccf5be8:0, corePoolSize=2, maxPoolSize=2 2024-12-03T18:57:33,490 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/db5a5ccf5be8:0, corePoolSize=1, maxPoolSize=1 2024-12-03T18:57:33,493 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-03T18:57:33,493 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-03T18:57:33,494 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T18:57:33,494 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-03T18:57:33,497 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733252283497 2024-12-03T18:57:33,497 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-03T18:57:33,497 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-03T18:57:33,497 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-03T18:57:33,497 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-03T18:57:33,497 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-03T18:57:33,497 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-03T18:57:33,497 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-03T18:57:33,498 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-03T18:57:33,498 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-03T18:57:33,498 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-03T18:57:33,498 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-03T18:57:33,498 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-03T18:57:33,499 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/db5a5ccf5be8:0:becomeActiveMaster-HFileCleaner.large.0-1733252253498,5,FailOnTimeoutGroup] 2024-12-03T18:57:33,499 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/db5a5ccf5be8:0:becomeActiveMaster-HFileCleaner.small.0-1733252253499,5,FailOnTimeoutGroup] 2024-12-03T18:57:33,499 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-03T18:57:33,499 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-03T18:57:33,499 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-03T18:57:33,499 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-03T18:57:33,501 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44849 is added to blk_1073741831_1007 (size=1321) 2024-12-03T18:57:33,501 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33431 is added to blk_1073741831_1007 (size=1321) 2024-12-03T18:57:33,502 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-03T18:57:33,502 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3 2024-12-03T18:57:33,507 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44849 is added to blk_1073741832_1008 (size=32) 2024-12-03T18:57:33,508 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33431 is added to blk_1073741832_1008 (size=32) 2024-12-03T18:57:33,508 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T18:57:33,509 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-03T18:57:33,510 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-03T18:57:33,510 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T18:57:33,511 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T18:57:33,511 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-03T18:57:33,512 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-03T18:57:33,512 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T18:57:33,512 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T18:57:33,512 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-03T18:57:33,513 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-03T18:57:33,513 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T18:57:33,514 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T18:57:33,514 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-03T18:57:33,515 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-03T18:57:33,515 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T18:57:33,515 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T18:57:33,515 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-03T18:57:33,516 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/hbase/meta/1588230740 2024-12-03T18:57:33,516 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/hbase/meta/1588230740 2024-12-03T18:57:33,517 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-03T18:57:33,517 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-03T18:57:33,517 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-03T18:57:33,518 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-03T18:57:33,520 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-03T18:57:33,520 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=689048, jitterRate=-0.12383091449737549}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-03T18:57:33,520 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733252253508Initializing all the Stores at 1733252253509 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733252253509Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733252253509Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733252253509Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733252253509Cleaning up temporary data from old regions at 1733252253517 (+8 ms)Region opened successfully at 1733252253520 (+3 ms) 2024-12-03T18:57:33,521 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-03T18:57:33,521 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-03T18:57:33,521 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-03T18:57:33,521 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-03T18:57:33,521 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-03T18:57:33,521 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-03T18:57:33,521 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733252253521Disabling compacts and flushes for region at 1733252253521Disabling writes for close at 1733252253521Writing region close event to WAL at 1733252253521Closed at 1733252253521 2024-12-03T18:57:33,522 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-03T18:57:33,522 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-03T18:57:33,523 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-03T18:57:33,524 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-03T18:57:33,525 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-03T18:57:33,585 INFO [RS:0;db5a5ccf5be8:38091 {}] regionserver.HRegionServer(746): ClusterId : ca1dad72-2a88-4428-a680-fcc535c8a12c 2024-12-03T18:57:33,585 DEBUG [RS:0;db5a5ccf5be8:38091 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-03T18:57:33,598 DEBUG [RS:0;db5a5ccf5be8:38091 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-03T18:57:33,598 DEBUG [RS:0;db5a5ccf5be8:38091 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-03T18:57:33,611 DEBUG [RS:0;db5a5ccf5be8:38091 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-03T18:57:33,611 DEBUG [RS:0;db5a5ccf5be8:38091 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@20b61007, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=db5a5ccf5be8/172.17.0.2:0 2024-12-03T18:57:33,626 DEBUG [RS:0;db5a5ccf5be8:38091 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;db5a5ccf5be8:38091 2024-12-03T18:57:33,626 INFO [RS:0;db5a5ccf5be8:38091 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-03T18:57:33,626 INFO [RS:0;db5a5ccf5be8:38091 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-03T18:57:33,626 DEBUG [RS:0;db5a5ccf5be8:38091 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-03T18:57:33,627 INFO [RS:0;db5a5ccf5be8:38091 {}] regionserver.HRegionServer(2659): reportForDuty to master=db5a5ccf5be8,34991,1733252252995 with port=38091, startcode=1733252253164 2024-12-03T18:57:33,627 DEBUG [RS:0;db5a5ccf5be8:38091 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-03T18:57:33,629 INFO [HMaster-EventLoopGroup-14-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51953, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.6 (auth:SIMPLE), service=RegionServerStatusService 2024-12-03T18:57:33,629 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34991 {}] master.ServerManager(363): Checking decommissioned status of RegionServer db5a5ccf5be8,38091,1733252253164 2024-12-03T18:57:33,629 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34991 {}] master.ServerManager(517): Registering regionserver=db5a5ccf5be8,38091,1733252253164 2024-12-03T18:57:33,631 DEBUG [RS:0;db5a5ccf5be8:38091 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3 2024-12-03T18:57:33,631 DEBUG [RS:0;db5a5ccf5be8:38091 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:42457 2024-12-03T18:57:33,631 DEBUG [RS:0;db5a5ccf5be8:38091 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-03T18:57:33,641 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34991-0x1019c8e80dc0000, quorum=127.0.0.1:56149, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-03T18:57:33,642 DEBUG [RS:0;db5a5ccf5be8:38091 {}] zookeeper.ZKUtil(111): regionserver:38091-0x1019c8e80dc0001, quorum=127.0.0.1:56149, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/db5a5ccf5be8,38091,1733252253164 2024-12-03T18:57:33,642 WARN [RS:0;db5a5ccf5be8:38091 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-03T18:57:33,642 INFO [RS:0;db5a5ccf5be8:38091 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-03T18:57:33,642 DEBUG [RS:0;db5a5ccf5be8:38091 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/WALs/db5a5ccf5be8,38091,1733252253164 2024-12-03T18:57:33,642 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [db5a5ccf5be8,38091,1733252253164] 2024-12-03T18:57:33,645 INFO [RS:0;db5a5ccf5be8:38091 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-03T18:57:33,647 INFO [RS:0;db5a5ccf5be8:38091 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-03T18:57:33,647 INFO [RS:0;db5a5ccf5be8:38091 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-03T18:57:33,647 INFO [RS:0;db5a5ccf5be8:38091 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-03T18:57:33,647 INFO [RS:0;db5a5ccf5be8:38091 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-03T18:57:33,648 INFO [RS:0;db5a5ccf5be8:38091 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-03T18:57:33,648 INFO [RS:0;db5a5ccf5be8:38091 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-03T18:57:33,648 DEBUG [RS:0;db5a5ccf5be8:38091 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/db5a5ccf5be8:0, corePoolSize=1, maxPoolSize=1 2024-12-03T18:57:33,648 DEBUG [RS:0;db5a5ccf5be8:38091 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/db5a5ccf5be8:0, corePoolSize=1, maxPoolSize=1 2024-12-03T18:57:33,649 DEBUG [RS:0;db5a5ccf5be8:38091 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/db5a5ccf5be8:0, corePoolSize=1, maxPoolSize=1 2024-12-03T18:57:33,649 DEBUG [RS:0;db5a5ccf5be8:38091 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/db5a5ccf5be8:0, corePoolSize=1, maxPoolSize=1 2024-12-03T18:57:33,649 DEBUG [RS:0;db5a5ccf5be8:38091 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/db5a5ccf5be8:0, corePoolSize=1, maxPoolSize=1 2024-12-03T18:57:33,649 DEBUG [RS:0;db5a5ccf5be8:38091 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/db5a5ccf5be8:0, corePoolSize=2, maxPoolSize=2 2024-12-03T18:57:33,649 DEBUG [RS:0;db5a5ccf5be8:38091 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/db5a5ccf5be8:0, corePoolSize=1, maxPoolSize=1 2024-12-03T18:57:33,649 DEBUG [RS:0;db5a5ccf5be8:38091 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/db5a5ccf5be8:0, corePoolSize=1, maxPoolSize=1 2024-12-03T18:57:33,649 DEBUG [RS:0;db5a5ccf5be8:38091 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/db5a5ccf5be8:0, corePoolSize=1, maxPoolSize=1 2024-12-03T18:57:33,649 DEBUG [RS:0;db5a5ccf5be8:38091 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/db5a5ccf5be8:0, corePoolSize=1, maxPoolSize=1 2024-12-03T18:57:33,649 DEBUG [RS:0;db5a5ccf5be8:38091 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/db5a5ccf5be8:0, corePoolSize=1, maxPoolSize=1 2024-12-03T18:57:33,649 DEBUG [RS:0;db5a5ccf5be8:38091 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/db5a5ccf5be8:0, corePoolSize=1, maxPoolSize=1 2024-12-03T18:57:33,649 DEBUG [RS:0;db5a5ccf5be8:38091 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/db5a5ccf5be8:0, corePoolSize=3, maxPoolSize=3 2024-12-03T18:57:33,649 DEBUG [RS:0;db5a5ccf5be8:38091 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/db5a5ccf5be8:0, corePoolSize=3, maxPoolSize=3 2024-12-03T18:57:33,650 INFO [RS:0;db5a5ccf5be8:38091 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-03T18:57:33,650 INFO [RS:0;db5a5ccf5be8:38091 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-03T18:57:33,650 INFO [RS:0;db5a5ccf5be8:38091 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-03T18:57:33,650 INFO [RS:0;db5a5ccf5be8:38091 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-03T18:57:33,650 INFO [RS:0;db5a5ccf5be8:38091 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-03T18:57:33,650 INFO [RS:0;db5a5ccf5be8:38091 {}] hbase.ChoreService(168): Chore ScheduledChore name=db5a5ccf5be8,38091,1733252253164-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-03T18:57:33,666 INFO [RS:0;db5a5ccf5be8:38091 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-03T18:57:33,666 INFO [RS:0;db5a5ccf5be8:38091 {}] hbase.ChoreService(168): Chore ScheduledChore name=db5a5ccf5be8,38091,1733252253164-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-03T18:57:33,666 INFO [RS:0;db5a5ccf5be8:38091 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T18:57:33,666 INFO [RS:0;db5a5ccf5be8:38091 {}] regionserver.Replication(171): db5a5ccf5be8,38091,1733252253164 started 2024-12-03T18:57:33,675 WARN [db5a5ccf5be8:34991 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-03T18:57:33,683 INFO [RS:0;db5a5ccf5be8:38091 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T18:57:33,683 INFO [RS:0;db5a5ccf5be8:38091 {}] regionserver.HRegionServer(1482): Serving as db5a5ccf5be8,38091,1733252253164, RpcServer on db5a5ccf5be8/172.17.0.2:38091, sessionid=0x1019c8e80dc0001 2024-12-03T18:57:33,683 DEBUG [RS:0;db5a5ccf5be8:38091 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-03T18:57:33,683 DEBUG [RS:0;db5a5ccf5be8:38091 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager db5a5ccf5be8,38091,1733252253164 2024-12-03T18:57:33,683 DEBUG [RS:0;db5a5ccf5be8:38091 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'db5a5ccf5be8,38091,1733252253164' 2024-12-03T18:57:33,683 DEBUG [RS:0;db5a5ccf5be8:38091 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-03T18:57:33,683 DEBUG [RS:0;db5a5ccf5be8:38091 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-03T18:57:33,684 DEBUG [RS:0;db5a5ccf5be8:38091 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-03T18:57:33,684 DEBUG [RS:0;db5a5ccf5be8:38091 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-03T18:57:33,684 DEBUG [RS:0;db5a5ccf5be8:38091 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager db5a5ccf5be8,38091,1733252253164 2024-12-03T18:57:33,684 DEBUG [RS:0;db5a5ccf5be8:38091 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'db5a5ccf5be8,38091,1733252253164' 2024-12-03T18:57:33,684 DEBUG [RS:0;db5a5ccf5be8:38091 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-03T18:57:33,684 DEBUG [RS:0;db5a5ccf5be8:38091 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-03T18:57:33,685 DEBUG [RS:0;db5a5ccf5be8:38091 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-03T18:57:33,685 INFO [RS:0;db5a5ccf5be8:38091 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-03T18:57:33,685 INFO [RS:0;db5a5ccf5be8:38091 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-03T18:57:33,788 INFO [RS:0;db5a5ccf5be8:38091 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=db5a5ccf5be8%2C38091%2C1733252253164, suffix=, logDir=hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/WALs/db5a5ccf5be8,38091,1733252253164, archiveDir=hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/oldWALs, maxLogs=32 2024-12-03T18:57:33,788 INFO [RS:0;db5a5ccf5be8:38091 {}] monitor.StreamSlowMonitor(122): New stream slow monitor db5a5ccf5be8%2C38091%2C1733252253164.1733252253788 2024-12-03T18:57:33,796 INFO [RS:0;db5a5ccf5be8:38091 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/WALs/db5a5ccf5be8,38091,1733252253164/db5a5ccf5be8%2C38091%2C1733252253164.1733252253788 2024-12-03T18:57:33,797 DEBUG [RS:0;db5a5ccf5be8:38091 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43093:43093),(127.0.0.1/127.0.0.1:40925:40925)] 2024-12-03T18:57:33,925 DEBUG [db5a5ccf5be8:34991 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-03T18:57:33,926 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=db5a5ccf5be8,38091,1733252253164 2024-12-03T18:57:33,927 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as db5a5ccf5be8,38091,1733252253164, state=OPENING 2024-12-03T18:57:33,936 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-03T18:57:33,946 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38091-0x1019c8e80dc0001, quorum=127.0.0.1:56149, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T18:57:33,946 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34991-0x1019c8e80dc0000, quorum=127.0.0.1:56149, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T18:57:33,947 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-03T18:57:33,947 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-03T18:57:33,947 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-03T18:57:33,947 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=db5a5ccf5be8,38091,1733252253164}] 2024-12-03T18:57:34,101 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-03T18:57:34,103 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58919, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-03T18:57:34,109 INFO [RS_OPEN_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-03T18:57:34,109 INFO [RS_OPEN_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-03T18:57:34,112 INFO [RS_OPEN_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=db5a5ccf5be8%2C38091%2C1733252253164.meta, suffix=.meta, logDir=hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/WALs/db5a5ccf5be8,38091,1733252253164, archiveDir=hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/oldWALs, maxLogs=32 2024-12-03T18:57:34,112 INFO [RS_OPEN_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor db5a5ccf5be8%2C38091%2C1733252253164.meta.1733252254112.meta 2024-12-03T18:57:34,118 INFO [RS_OPEN_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/WALs/db5a5ccf5be8,38091,1733252253164/db5a5ccf5be8%2C38091%2C1733252253164.meta.1733252254112.meta 2024-12-03T18:57:34,119 DEBUG [RS_OPEN_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43093:43093),(127.0.0.1/127.0.0.1:40925:40925)] 2024-12-03T18:57:34,124 DEBUG [RS_OPEN_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-03T18:57:34,124 DEBUG [RS_OPEN_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-03T18:57:34,125 DEBUG [RS_OPEN_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-03T18:57:34,125 INFO [RS_OPEN_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-03T18:57:34,125 DEBUG [RS_OPEN_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-03T18:57:34,125 DEBUG [RS_OPEN_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T18:57:34,125 DEBUG [RS_OPEN_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-03T18:57:34,125 DEBUG [RS_OPEN_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-03T18:57:34,126 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-03T18:57:34,127 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-03T18:57:34,127 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T18:57:34,127 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T18:57:34,127 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-03T18:57:34,128 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-03T18:57:34,128 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T18:57:34,128 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T18:57:34,129 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-03T18:57:34,129 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-03T18:57:34,129 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T18:57:34,130 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T18:57:34,130 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-03T18:57:34,130 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-03T18:57:34,130 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T18:57:34,131 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T18:57:34,131 DEBUG [RS_OPEN_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-03T18:57:34,132 DEBUG [RS_OPEN_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/hbase/meta/1588230740 2024-12-03T18:57:34,133 DEBUG [RS_OPEN_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/hbase/meta/1588230740 2024-12-03T18:57:34,134 DEBUG [RS_OPEN_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-03T18:57:34,134 DEBUG [RS_OPEN_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-03T18:57:34,134 DEBUG [RS_OPEN_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-03T18:57:34,135 DEBUG [RS_OPEN_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-03T18:57:34,136 INFO [RS_OPEN_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=806661, jitterRate=0.025723755359649658}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-03T18:57:34,136 DEBUG [RS_OPEN_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-03T18:57:34,137 DEBUG [RS_OPEN_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733252254125Writing region info on filesystem at 1733252254125Initializing all the Stores at 1733252254126 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733252254126Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733252254126Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733252254126Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733252254126Cleaning up temporary data from old regions at 1733252254134 (+8 ms)Running coprocessor post-open hooks at 1733252254136 (+2 ms)Region opened successfully at 1733252254137 (+1 ms) 2024-12-03T18:57:34,138 INFO [RS_OPEN_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733252254100 2024-12-03T18:57:34,140 DEBUG [RS_OPEN_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-03T18:57:34,140 INFO [RS_OPEN_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-03T18:57:34,141 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=db5a5ccf5be8,38091,1733252253164 2024-12-03T18:57:34,142 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as db5a5ccf5be8,38091,1733252253164, state=OPEN 2024-12-03T18:57:34,220 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38091-0x1019c8e80dc0001, quorum=127.0.0.1:56149, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-03T18:57:34,220 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34991-0x1019c8e80dc0000, quorum=127.0.0.1:56149, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-03T18:57:34,220 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=db5a5ccf5be8,38091,1733252253164 2024-12-03T18:57:34,220 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-03T18:57:34,220 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-03T18:57:34,223 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-03T18:57:34,223 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=db5a5ccf5be8,38091,1733252253164 in 273 msec 2024-12-03T18:57:34,226 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-03T18:57:34,226 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 700 msec 2024-12-03T18:57:34,227 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-03T18:57:34,227 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-03T18:57:34,228 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-03T18:57:34,228 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=db5a5ccf5be8,38091,1733252253164, seqNum=-1] 2024-12-03T18:57:34,228 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-03T18:57:34,229 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43081, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-03T18:57:34,235 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 747 msec 2024-12-03T18:57:34,235 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733252254235, completionTime=-1 2024-12-03T18:57:34,235 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-03T18:57:34,235 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-03T18:57:34,237 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-12-03T18:57:34,237 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733252314237 2024-12-03T18:57:34,237 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733252374237 2024-12-03T18:57:34,237 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 1 msec 2024-12-03T18:57:34,237 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=db5a5ccf5be8,34991,1733252252995-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-03T18:57:34,237 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=db5a5ccf5be8,34991,1733252252995-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T18:57:34,237 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=db5a5ccf5be8,34991,1733252252995-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T18:57:34,237 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-db5a5ccf5be8:34991, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T18:57:34,237 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-03T18:57:34,238 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-03T18:57:34,239 DEBUG [master/db5a5ccf5be8:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-03T18:57:34,241 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.021sec 2024-12-03T18:57:34,241 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-03T18:57:34,241 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-03T18:57:34,241 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-03T18:57:34,241 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-03T18:57:34,241 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-03T18:57:34,241 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=db5a5ccf5be8,34991,1733252252995-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-03T18:57:34,242 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=db5a5ccf5be8,34991,1733252252995-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-03T18:57:34,244 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-03T18:57:34,244 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-03T18:57:34,244 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=db5a5ccf5be8,34991,1733252252995-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T18:57:34,285 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@651a5cab, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T18:57:34,285 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request db5a5ccf5be8,34991,-1 for getting cluster id 2024-12-03T18:57:34,286 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-03T18:57:34,287 DEBUG [HMaster-EventLoopGroup-14-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'ca1dad72-2a88-4428-a680-fcc535c8a12c' 2024-12-03T18:57:34,287 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-03T18:57:34,287 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "ca1dad72-2a88-4428-a680-fcc535c8a12c" 2024-12-03T18:57:34,287 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5b5783b4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T18:57:34,287 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [db5a5ccf5be8,34991,-1] 2024-12-03T18:57:34,287 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-03T18:57:34,288 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T18:57:34,288 INFO [HMaster-EventLoopGroup-14-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54554, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-03T18:57:34,289 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@22a53521, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T18:57:34,290 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-03T18:57:34,290 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=db5a5ccf5be8,38091,1733252253164, seqNum=-1] 2024-12-03T18:57:34,291 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-03T18:57:34,291 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60092, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-03T18:57:34,293 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=db5a5ccf5be8,34991,1733252252995 2024-12-03T18:57:34,293 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T18:57:34,295 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-12-03T18:57:34,295 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-03T18:57:34,296 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.AsyncConnectionImpl(321): The fetched master address is db5a5ccf5be8,34991,1733252252995 2024-12-03T18:57:34,296 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@3f98ea4a 2024-12-03T18:57:34,296 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-03T18:57:34,297 INFO [HMaster-EventLoopGroup-14-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54558, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-03T18:57:34,297 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34991 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-12-03T18:57:34,297 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34991 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-12-03T18:57:34,298 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34991 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-03T18:57:34,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34991 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRolling 2024-12-03T18:57:34,300 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-12-03T18:57:34,300 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T18:57:34,300 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34991 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRolling" procId is: 4 2024-12-03T18:57:34,301 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-03T18:57:34,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34991 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-03T18:57:34,306 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44849 is added to blk_1073741835_1011 (size=381) 2024-12-03T18:57:34,307 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33431 is added to blk_1073741835_1011 (size=381) 2024-12-03T18:57:34,308 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 549af462e760befc589617aa40c27d98, NAME => 'TestLogRolling-testLogRolling,,1733252254297.549af462e760befc589617aa40c27d98.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3 2024-12-03T18:57:34,314 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33431 is added to blk_1073741836_1012 (size=64) 2024-12-03T18:57:34,314 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44849 is added to blk_1073741836_1012 (size=64) 2024-12-03T18:57:34,315 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1733252254297.549af462e760befc589617aa40c27d98.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T18:57:34,315 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1722): Closing 549af462e760befc589617aa40c27d98, disabling compactions & flushes 2024-12-03T18:57:34,315 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1733252254297.549af462e760befc589617aa40c27d98. 2024-12-03T18:57:34,315 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1733252254297.549af462e760befc589617aa40c27d98. 2024-12-03T18:57:34,315 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1733252254297.549af462e760befc589617aa40c27d98. after waiting 0 ms 2024-12-03T18:57:34,315 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1733252254297.549af462e760befc589617aa40c27d98. 2024-12-03T18:57:34,315 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1733252254297.549af462e760befc589617aa40c27d98. 2024-12-03T18:57:34,315 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for 549af462e760befc589617aa40c27d98: Waiting for close lock at 1733252254315Disabling compacts and flushes for region at 1733252254315Disabling writes for close at 1733252254315Writing region close event to WAL at 1733252254315Closed at 1733252254315 2024-12-03T18:57:34,316 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_ADD_TO_META 2024-12-03T18:57:34,317 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRolling,,1733252254297.549af462e760befc589617aa40c27d98.","families":{"info":[{"qualifier":"regioninfo","vlen":63,"tag":[],"timestamp":"1733252254316"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733252254316"}]},"ts":"1733252254316"} 2024-12-03T18:57:34,319 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-03T18:57:34,320 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-03T18:57:34,320 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733252254320"}]},"ts":"1733252254320"} 2024-12-03T18:57:34,322 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRolling, state=ENABLING in hbase:meta 2024-12-03T18:57:34,323 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=549af462e760befc589617aa40c27d98, ASSIGN}] 2024-12-03T18:57:34,324 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=549af462e760befc589617aa40c27d98, ASSIGN 2024-12-03T18:57:34,325 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=549af462e760befc589617aa40c27d98, ASSIGN; state=OFFLINE, location=db5a5ccf5be8,38091,1733252253164; forceNewPlan=false, retain=false 2024-12-03T18:57:34,447 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,44293,1733252112865/db5a5ccf5be8%2C44293%2C1733252112865.1733252113091 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:57:34,476 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=549af462e760befc589617aa40c27d98, regionState=OPENING, regionLocation=db5a5ccf5be8,38091,1733252253164 2024-12-03T18:57:34,478 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.meta.1733252112663.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:57:34,481 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=549af462e760befc589617aa40c27d98, ASSIGN because future has completed 2024-12-03T18:57:34,483 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 549af462e760befc589617aa40c27d98, server=db5a5ccf5be8,38091,1733252253164}] 2024-12-03T18:57:34,642 INFO [RS_OPEN_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,,1733252254297.549af462e760befc589617aa40c27d98. 2024-12-03T18:57:34,643 DEBUG [RS_OPEN_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 549af462e760befc589617aa40c27d98, NAME => 'TestLogRolling-testLogRolling,,1733252254297.549af462e760befc589617aa40c27d98.', STARTKEY => '', ENDKEY => ''} 2024-12-03T18:57:34,643 DEBUG [RS_OPEN_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling 549af462e760befc589617aa40c27d98 2024-12-03T18:57:34,643 DEBUG [RS_OPEN_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1733252254297.549af462e760befc589617aa40c27d98.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T18:57:34,643 DEBUG [RS_OPEN_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 549af462e760befc589617aa40c27d98 2024-12-03T18:57:34,643 DEBUG [RS_OPEN_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 549af462e760befc589617aa40c27d98 2024-12-03T18:57:34,644 INFO [StoreOpener-549af462e760befc589617aa40c27d98-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 549af462e760befc589617aa40c27d98 2024-12-03T18:57:34,645 INFO [StoreOpener-549af462e760befc589617aa40c27d98-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 549af462e760befc589617aa40c27d98 columnFamilyName info 2024-12-03T18:57:34,645 DEBUG [StoreOpener-549af462e760befc589617aa40c27d98-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T18:57:34,646 INFO [StoreOpener-549af462e760befc589617aa40c27d98-1 {}] regionserver.HStore(327): Store=549af462e760befc589617aa40c27d98/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T18:57:34,646 DEBUG [RS_OPEN_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 549af462e760befc589617aa40c27d98 2024-12-03T18:57:34,647 DEBUG [RS_OPEN_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/549af462e760befc589617aa40c27d98 2024-12-03T18:57:34,647 DEBUG [RS_OPEN_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/549af462e760befc589617aa40c27d98 2024-12-03T18:57:34,647 DEBUG [RS_OPEN_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 549af462e760befc589617aa40c27d98 2024-12-03T18:57:34,647 DEBUG [RS_OPEN_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 549af462e760befc589617aa40c27d98 2024-12-03T18:57:34,649 DEBUG [RS_OPEN_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 549af462e760befc589617aa40c27d98 2024-12-03T18:57:34,651 DEBUG [RS_OPEN_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/549af462e760befc589617aa40c27d98/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-03T18:57:34,651 INFO [RS_OPEN_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 549af462e760befc589617aa40c27d98; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=769544, jitterRate=-0.021475091576576233}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-03T18:57:34,651 DEBUG [RS_OPEN_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 549af462e760befc589617aa40c27d98 2024-12-03T18:57:34,651 DEBUG [RS_OPEN_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 549af462e760befc589617aa40c27d98: Running coprocessor pre-open hook at 1733252254643Writing region info on filesystem at 1733252254643Initializing all the Stores at 1733252254644 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733252254644Cleaning up temporary data from old regions at 1733252254647 (+3 ms)Running coprocessor post-open hooks at 1733252254651 (+4 ms)Region opened successfully at 1733252254651 2024-12-03T18:57:34,653 INFO [RS_OPEN_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,,1733252254297.549af462e760befc589617aa40c27d98., pid=6, masterSystemTime=1733252254639 2024-12-03T18:57:34,655 DEBUG [RS_OPEN_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,,1733252254297.549af462e760befc589617aa40c27d98. 2024-12-03T18:57:34,655 INFO [RS_OPEN_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,,1733252254297.549af462e760befc589617aa40c27d98. 2024-12-03T18:57:34,655 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=549af462e760befc589617aa40c27d98, regionState=OPEN, openSeqNum=2, regionLocation=db5a5ccf5be8,38091,1733252253164 2024-12-03T18:57:34,657 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 549af462e760befc589617aa40c27d98, server=db5a5ccf5be8,38091,1733252253164 because future has completed 2024-12-03T18:57:34,660 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-12-03T18:57:34,660 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 549af462e760befc589617aa40c27d98, server=db5a5ccf5be8,38091,1733252253164 in 176 msec 2024-12-03T18:57:34,662 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-12-03T18:57:34,662 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=549af462e760befc589617aa40c27d98, ASSIGN in 337 msec 2024-12-03T18:57:34,663 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-03T18:57:34,663 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733252254663"}]},"ts":"1733252254663"} 2024-12-03T18:57:34,666 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRolling, state=ENABLED in hbase:meta 2024-12-03T18:57:34,667 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_POST_OPERATION 2024-12-03T18:57:34,669 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRolling in 369 msec 2024-12-03T18:57:34,924 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:57:34,924 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:57:34,925 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:57:34,925 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:57:34,925 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:57:34,926 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:57:34,927 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:57:34,928 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:57:34,943 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:57:34,943 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:57:34,943 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:57:34,943 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:57:34,943 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:57:34,943 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:57:34,945 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:57:34,946 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:57:34,946 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:57:34,947 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:57:35,448 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,44293,1733252112865/db5a5ccf5be8%2C44293%2C1733252112865.1733252113091 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:57:35,451 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-03T18:57:35,452 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:57:35,452 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:57:35,452 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:57:35,452 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:57:35,452 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:57:35,453 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:57:35,453 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:57:35,454 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:57:35,471 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:57:35,471 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:57:35,471 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:57:35,471 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:57:35,472 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:57:35,472 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:57:35,474 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:57:35,474 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:57:35,475 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:57:35,477 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:57:35,479 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.meta.1733252112663.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:57:36,448 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,44293,1733252112865/db5a5ccf5be8%2C44293%2C1733252112865.1733252113091 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:57:36,480 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.meta.1733252112663.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:57:37,449 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,44293,1733252112865/db5a5ccf5be8%2C44293%2C1733252112865.1733252113091 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:57:37,481 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.meta.1733252112663.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:57:37,876 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling 2024-12-03T18:57:37,876 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling Metrics about Tables on a single HBase RegionServer 2024-12-03T18:57:37,877 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-03T18:57:38,450 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,44293,1733252112865/db5a5ccf5be8%2C44293%2C1733252112865.1733252113091 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:57:38,482 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.meta.1733252112663.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:57:39,450 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,44293,1733252112865/db5a5ccf5be8%2C44293%2C1733252112865.1733252113091 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:57:39,482 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.meta.1733252112663.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:57:39,646 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-03T18:57:39,646 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRolling' 2024-12-03T18:57:40,451 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,44293,1733252112865/db5a5ccf5be8%2C44293%2C1733252112865.1733252113091 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:57:40,483 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.meta.1733252112663.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:57:41,452 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,44293,1733252112865/db5a5ccf5be8%2C44293%2C1733252112865.1733252113091 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:57:41,484 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.meta.1733252112663.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:57:42,452 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,44293,1733252112865/db5a5ccf5be8%2C44293%2C1733252112865.1733252113091 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:57:42,484 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.meta.1733252112663.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:57:43,383 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-03T18:57:43,384 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:57:43,385 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:57:43,385 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:57:43,386 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:57:43,386 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:57:43,387 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:57:43,388 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:57:43,389 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:57:43,411 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:57:43,412 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:57:43,412 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:57:43,412 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:57:43,413 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:57:43,413 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:57:43,419 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:57:43,419 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:57:43,420 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:57:43,424 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:57:43,453 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,44293,1733252112865/db5a5ccf5be8%2C44293%2C1733252112865.1733252113091 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:57:43,485 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.meta.1733252112663.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:57:44,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34991 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-03T18:57:44,391 INFO [RPCClient-NioEventLoopGroup-4-7 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRolling completed 2024-12-03T18:57:44,391 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRolling,, stopping at row=TestLogRolling-testLogRolling ,, for max=2147483647 with caching=100 2024-12-03T18:57:44,398 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRolling 2024-12-03T18:57:44,398 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRolling,,1733252254297.549af462e760befc589617aa40c27d98. 2024-12-03T18:57:44,403 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testLogRolling,,1733252254297.549af462e760befc589617aa40c27d98., hostname=db5a5ccf5be8,38091,1733252253164, seqNum=2] 2024-12-03T18:57:44,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38091 {}] regionserver.HRegion(8855): Flush requested on 549af462e760befc589617aa40c27d98 2024-12-03T18:57:44,422 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 549af462e760befc589617aa40c27d98 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-03T18:57:44,443 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/549af462e760befc589617aa40c27d98/.tmp/info/577f5c5208f94d8ab526a3e7d9d5e816 is 1080, key is row0001/info:/1733252264404/Put/seqid=0 2024-12-03T18:57:44,454 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,44293,1733252112865/db5a5ccf5be8%2C44293%2C1733252112865.1733252113091 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:57:44,467 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38091 {}] regionserver.HRegion(5310): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=549af462e760befc589617aa40c27d98, server=db5a5ccf5be8,38091,1733252253164 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-12-03T18:57:44,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38091 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:60092 deadline: 1733252274467, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=549af462e760befc589617aa40c27d98, server=db5a5ccf5be8,38091,1733252253164 2024-12-03T18:57:44,472 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44849 is added to blk_1073741837_1013 (size=12509) 2024-12-03T18:57:44,472 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33431 is added to blk_1073741837_1013 (size=12509) 2024-12-03T18:57:44,473 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/549af462e760befc589617aa40c27d98/.tmp/info/577f5c5208f94d8ab526a3e7d9d5e816 2024-12-03T18:57:44,481 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/549af462e760befc589617aa40c27d98/.tmp/info/577f5c5208f94d8ab526a3e7d9d5e816 as hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/549af462e760befc589617aa40c27d98/info/577f5c5208f94d8ab526a3e7d9d5e816 2024-12-03T18:57:44,486 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/549af462e760befc589617aa40c27d98/info/577f5c5208f94d8ab526a3e7d9d5e816, entries=7, sequenceid=11, filesize=12.2 K 2024-12-03T18:57:44,486 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.meta.1733252112663.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:57:44,487 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=23.12 KB/23672 for 549af462e760befc589617aa40c27d98 in 66ms, sequenceid=11, compaction requested=false 2024-12-03T18:57:44,487 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 549af462e760befc589617aa40c27d98: 2024-12-03T18:57:44,490 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,,1733252254297.549af462e760befc589617aa40c27d98., hostname=db5a5ccf5be8,38091,1733252253164, seqNum=2 , the old value is region=TestLogRolling-testLogRolling,,1733252254297.549af462e760befc589617aa40c27d98., hostname=db5a5ccf5be8,38091,1733252253164, seqNum=2, error=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=549af462e760befc589617aa40c27d98, server=db5a5ccf5be8,38091,1733252253164 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-03T18:57:44,490 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,,1733252254297.549af462e760befc589617aa40c27d98., hostname=db5a5ccf5be8,38091,1733252253164, seqNum=2 is org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=549af462e760befc589617aa40c27d98, server=db5a5ccf5be8,38091,1733252253164 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-03T18:57:44,490 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestLogRolling-testLogRolling,,1733252254297.549af462e760befc589617aa40c27d98., hostname=db5a5ccf5be8,38091,1733252253164, seqNum=2 because the exception is null or not the one we care about 2024-12-03T18:57:45,454 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,44293,1733252112865/db5a5ccf5be8%2C44293%2C1733252112865.1733252113091 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:57:45,486 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.meta.1733252112663.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:57:46,456 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,44293,1733252112865/db5a5ccf5be8%2C44293%2C1733252112865.1733252113091 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:57:46,487 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.meta.1733252112663.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:57:47,456 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,44293,1733252112865/db5a5ccf5be8%2C44293%2C1733252112865.1733252113091 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:57:47,488 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.meta.1733252112663.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:57:48,457 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,44293,1733252112865/db5a5ccf5be8%2C44293%2C1733252112865.1733252113091 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:57:48,489 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.meta.1733252112663.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:57:49,457 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,44293,1733252112865/db5a5ccf5be8%2C44293%2C1733252112865.1733252113091 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:57:49,489 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.meta.1733252112663.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:57:50,458 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,44293,1733252112865/db5a5ccf5be8%2C44293%2C1733252112865.1733252113091 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:57:50,490 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.meta.1733252112663.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:57:51,459 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,44293,1733252112865/db5a5ccf5be8%2C44293%2C1733252112865.1733252113091 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:57:51,490 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.meta.1733252112663.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:57:52,459 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,44293,1733252112865/db5a5ccf5be8%2C44293%2C1733252112865.1733252113091 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:57:52,491 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.meta.1733252112663.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:57:53,460 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,44293,1733252112865/db5a5ccf5be8%2C44293%2C1733252112865.1733252113091 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:57:53,492 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.meta.1733252112663.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:57:54,460 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,44293,1733252112865/db5a5ccf5be8%2C44293%2C1733252112865.1733252113091 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:57:54,492 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.meta.1733252112663.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:57:54,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38091 {}] regionserver.HRegion(8855): Flush requested on 549af462e760befc589617aa40c27d98 2024-12-03T18:57:54,510 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 549af462e760befc589617aa40c27d98 1/1 column families, dataSize=24.17 KB heapSize=26.13 KB 2024-12-03T18:57:54,517 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/549af462e760befc589617aa40c27d98/.tmp/info/2b8ad4350c2e4746add3d3d52cdb29b0 is 1080, key is row0008/info:/1733252264423/Put/seqid=0 2024-12-03T18:57:54,522 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44849 is added to blk_1073741838_1014 (size=29761) 2024-12-03T18:57:54,522 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33431 is added to blk_1073741838_1014 (size=29761) 2024-12-03T18:57:54,523 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.17 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/549af462e760befc589617aa40c27d98/.tmp/info/2b8ad4350c2e4746add3d3d52cdb29b0 2024-12-03T18:57:54,530 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/549af462e760befc589617aa40c27d98/.tmp/info/2b8ad4350c2e4746add3d3d52cdb29b0 as hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/549af462e760befc589617aa40c27d98/info/2b8ad4350c2e4746add3d3d52cdb29b0 2024-12-03T18:57:54,536 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/549af462e760befc589617aa40c27d98/info/2b8ad4350c2e4746add3d3d52cdb29b0, entries=23, sequenceid=37, filesize=29.1 K 2024-12-03T18:57:54,537 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~24.17 KB/24748, heapSize ~26.11 KB/26736, currentSize=2.10 KB/2152 for 549af462e760befc589617aa40c27d98 in 27ms, sequenceid=37, compaction requested=false 2024-12-03T18:57:54,537 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 549af462e760befc589617aa40c27d98: 2024-12-03T18:57:54,538 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=41.3 K, sizeToCheck=16.0 K 2024-12-03T18:57:54,538 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-03T18:57:54,538 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/549af462e760befc589617aa40c27d98/info/2b8ad4350c2e4746add3d3d52cdb29b0 because midkey is the same as first or last row 2024-12-03T18:57:55,461 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,44293,1733252112865/db5a5ccf5be8%2C44293%2C1733252112865.1733252113091 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:57:55,493 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.meta.1733252112663.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:57:56,462 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,44293,1733252112865/db5a5ccf5be8%2C44293%2C1733252112865.1733252113091 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:57:56,493 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.meta.1733252112663.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:57:56,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38091 {}] regionserver.HRegion(8855): Flush requested on 549af462e760befc589617aa40c27d98 2024-12-03T18:57:56,527 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 549af462e760befc589617aa40c27d98 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-03T18:57:56,532 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/549af462e760befc589617aa40c27d98/.tmp/info/09cb524a8f814f6aa9b738380c4a5a33 is 1080, key is row0031/info:/1733252274512/Put/seqid=0 2024-12-03T18:57:56,538 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33431 is added to blk_1073741839_1015 (size=12509) 2024-12-03T18:57:56,538 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44849 is added to blk_1073741839_1015 (size=12509) 2024-12-03T18:57:56,539 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=47 (bloomFilter=true), to=hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/549af462e760befc589617aa40c27d98/.tmp/info/09cb524a8f814f6aa9b738380c4a5a33 2024-12-03T18:57:56,546 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/549af462e760befc589617aa40c27d98/.tmp/info/09cb524a8f814f6aa9b738380c4a5a33 as hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/549af462e760befc589617aa40c27d98/info/09cb524a8f814f6aa9b738380c4a5a33 2024-12-03T18:57:56,555 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/549af462e760befc589617aa40c27d98/info/09cb524a8f814f6aa9b738380c4a5a33, entries=7, sequenceid=47, filesize=12.2 K 2024-12-03T18:57:56,557 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=8.41 KB/8608 for 549af462e760befc589617aa40c27d98 in 30ms, sequenceid=47, compaction requested=true 2024-12-03T18:57:56,557 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 549af462e760befc589617aa40c27d98: 2024-12-03T18:57:56,557 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=53.5 K, sizeToCheck=16.0 K 2024-12-03T18:57:56,557 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-03T18:57:56,557 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/549af462e760befc589617aa40c27d98/info/2b8ad4350c2e4746add3d3d52cdb29b0 because midkey is the same as first or last row 2024-12-03T18:57:56,558 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 549af462e760befc589617aa40c27d98:info, priority=-2147483648, current under compaction store size is 1 2024-12-03T18:57:56,558 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T18:57:56,558 DEBUG [RS:0;db5a5ccf5be8:38091-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-03T18:57:56,561 DEBUG [RS:0;db5a5ccf5be8:38091-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 54779 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-03T18:57:56,561 DEBUG [RS:0;db5a5ccf5be8:38091-shortCompactions-0 {}] regionserver.HStore(1541): 549af462e760befc589617aa40c27d98/info is initiating minor compaction (all files) 2024-12-03T18:57:56,561 INFO [RS:0;db5a5ccf5be8:38091-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 549af462e760befc589617aa40c27d98/info in TestLogRolling-testLogRolling,,1733252254297.549af462e760befc589617aa40c27d98. 2024-12-03T18:57:56,561 INFO [RS:0;db5a5ccf5be8:38091-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/549af462e760befc589617aa40c27d98/info/577f5c5208f94d8ab526a3e7d9d5e816, hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/549af462e760befc589617aa40c27d98/info/2b8ad4350c2e4746add3d3d52cdb29b0, hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/549af462e760befc589617aa40c27d98/info/09cb524a8f814f6aa9b738380c4a5a33] into tmpdir=hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/549af462e760befc589617aa40c27d98/.tmp, totalSize=53.5 K 2024-12-03T18:57:56,562 DEBUG [RS:0;db5a5ccf5be8:38091-shortCompactions-0 {}] compactions.Compactor(225): Compacting 577f5c5208f94d8ab526a3e7d9d5e816, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1733252264404 2024-12-03T18:57:56,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38091 {}] regionserver.HRegion(8855): Flush requested on 549af462e760befc589617aa40c27d98 2024-12-03T18:57:56,562 DEBUG [RS:0;db5a5ccf5be8:38091-shortCompactions-0 {}] compactions.Compactor(225): Compacting 2b8ad4350c2e4746add3d3d52cdb29b0, keycount=23, bloomtype=ROW, size=29.1 K, encoding=NONE, compression=NONE, seqNum=37, earliestPutTs=1733252264423 2024-12-03T18:57:56,562 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 549af462e760befc589617aa40c27d98 1/1 column families, dataSize=9.46 KB heapSize=10.38 KB 2024-12-03T18:57:56,563 DEBUG [RS:0;db5a5ccf5be8:38091-shortCompactions-0 {}] compactions.Compactor(225): Compacting 09cb524a8f814f6aa9b738380c4a5a33, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=47, earliestPutTs=1733252274512 2024-12-03T18:57:56,568 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/549af462e760befc589617aa40c27d98/.tmp/info/0b9e91e0b5eb4f929e288fe9de7b4282 is 1080, key is row0038/info:/1733252276529/Put/seqid=0 2024-12-03T18:57:56,594 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33431 is added to blk_1073741840_1016 (size=14663) 2024-12-03T18:57:56,594 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44849 is added to blk_1073741840_1016 (size=14663) 2024-12-03T18:57:56,594 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=9.46 KB at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/549af462e760befc589617aa40c27d98/.tmp/info/0b9e91e0b5eb4f929e288fe9de7b4282 2024-12-03T18:57:56,596 INFO [RS:0;db5a5ccf5be8:38091-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 549af462e760befc589617aa40c27d98#info#compaction#60 average throughput is 18.98 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-03T18:57:56,597 DEBUG [RS:0;db5a5ccf5be8:38091-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/549af462e760befc589617aa40c27d98/.tmp/info/c7657e9567954c9d8a233b8f27033220 is 1080, key is row0001/info:/1733252264404/Put/seqid=0 2024-12-03T18:57:56,601 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/549af462e760befc589617aa40c27d98/.tmp/info/0b9e91e0b5eb4f929e288fe9de7b4282 as hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/549af462e760befc589617aa40c27d98/info/0b9e91e0b5eb4f929e288fe9de7b4282 2024-12-03T18:57:56,606 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44849 is added to blk_1073741841_1017 (size=44978) 2024-12-03T18:57:56,606 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33431 is added to blk_1073741841_1017 (size=44978) 2024-12-03T18:57:56,608 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/549af462e760befc589617aa40c27d98/info/0b9e91e0b5eb4f929e288fe9de7b4282, entries=9, sequenceid=59, filesize=14.3 K 2024-12-03T18:57:56,609 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~9.46 KB/9684, heapSize ~10.36 KB/10608, currentSize=18.91 KB/19368 for 549af462e760befc589617aa40c27d98 in 47ms, sequenceid=59, compaction requested=false 2024-12-03T18:57:56,609 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 549af462e760befc589617aa40c27d98: 2024-12-03T18:57:56,609 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=67.8 K, sizeToCheck=16.0 K 2024-12-03T18:57:56,609 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-03T18:57:56,609 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/549af462e760befc589617aa40c27d98/info/2b8ad4350c2e4746add3d3d52cdb29b0 because midkey is the same as first or last row 2024-12-03T18:57:56,612 DEBUG [RS:0;db5a5ccf5be8:38091-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/549af462e760befc589617aa40c27d98/.tmp/info/c7657e9567954c9d8a233b8f27033220 as hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/549af462e760befc589617aa40c27d98/info/c7657e9567954c9d8a233b8f27033220 2024-12-03T18:57:56,618 INFO [RS:0;db5a5ccf5be8:38091-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 549af462e760befc589617aa40c27d98/info of 549af462e760befc589617aa40c27d98 into c7657e9567954c9d8a233b8f27033220(size=43.9 K), total size for store is 58.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T18:57:56,618 DEBUG [RS:0;db5a5ccf5be8:38091-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 549af462e760befc589617aa40c27d98: 2024-12-03T18:57:56,618 INFO [RS:0;db5a5ccf5be8:38091-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1733252254297.549af462e760befc589617aa40c27d98., storeName=549af462e760befc589617aa40c27d98/info, priority=13, startTime=1733252276557; duration=0sec 2024-12-03T18:57:56,619 DEBUG [RS:0;db5a5ccf5be8:38091-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=58.2 K, sizeToCheck=16.0 K 2024-12-03T18:57:56,619 DEBUG [RS:0;db5a5ccf5be8:38091-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-03T18:57:56,619 DEBUG [RS:0;db5a5ccf5be8:38091-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/549af462e760befc589617aa40c27d98/info/c7657e9567954c9d8a233b8f27033220 because midkey is the same as first or last row 2024-12-03T18:57:56,619 DEBUG [RS:0;db5a5ccf5be8:38091-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=58.2 K, sizeToCheck=16.0 K 2024-12-03T18:57:56,619 DEBUG [RS:0;db5a5ccf5be8:38091-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-03T18:57:56,619 DEBUG [RS:0;db5a5ccf5be8:38091-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/549af462e760befc589617aa40c27d98/info/c7657e9567954c9d8a233b8f27033220 because midkey is the same as first or last row 2024-12-03T18:57:56,619 DEBUG [RS:0;db5a5ccf5be8:38091-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=58.2 K, sizeToCheck=16.0 K 2024-12-03T18:57:56,619 DEBUG [RS:0;db5a5ccf5be8:38091-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-03T18:57:56,619 DEBUG [RS:0;db5a5ccf5be8:38091-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/549af462e760befc589617aa40c27d98/info/c7657e9567954c9d8a233b8f27033220 because midkey is the same as first or last row 2024-12-03T18:57:56,619 DEBUG [RS:0;db5a5ccf5be8:38091-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T18:57:56,619 DEBUG [RS:0;db5a5ccf5be8:38091-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 549af462e760befc589617aa40c27d98:info 2024-12-03T18:57:57,462 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,44293,1733252112865/db5a5ccf5be8%2C44293%2C1733252112865.1733252113091 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:57:57,494 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.meta.1733252112663.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:57:58,463 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,44293,1733252112865/db5a5ccf5be8%2C44293%2C1733252112865.1733252113091 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:57:58,495 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.meta.1733252112663.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:57:58,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38091 {}] regionserver.HRegion(8855): Flush requested on 549af462e760befc589617aa40c27d98 2024-12-03T18:57:58,609 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 549af462e760befc589617aa40c27d98 1/1 column families, dataSize=19.96 KB heapSize=21.63 KB 2024-12-03T18:57:58,616 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/549af462e760befc589617aa40c27d98/.tmp/info/660ae6892c2a4bf2bf60f38f0ff84b0a is 1080, key is row0047/info:/1733252276564/Put/seqid=0 2024-12-03T18:57:58,622 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44849 is added to blk_1073741842_1018 (size=25453) 2024-12-03T18:57:58,623 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33431 is added to blk_1073741842_1018 (size=25453) 2024-12-03T18:57:58,623 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=19.96 KB at sequenceid=82 (bloomFilter=true), to=hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/549af462e760befc589617aa40c27d98/.tmp/info/660ae6892c2a4bf2bf60f38f0ff84b0a 2024-12-03T18:57:58,630 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/549af462e760befc589617aa40c27d98/.tmp/info/660ae6892c2a4bf2bf60f38f0ff84b0a as hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/549af462e760befc589617aa40c27d98/info/660ae6892c2a4bf2bf60f38f0ff84b0a 2024-12-03T18:57:58,635 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38091 {}] regionserver.HRegion(5310): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=549af462e760befc589617aa40c27d98, server=db5a5ccf5be8,38091,1733252253164 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-12-03T18:57:58,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38091 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:60092 deadline: 1733252288635, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=549af462e760befc589617aa40c27d98, server=db5a5ccf5be8,38091,1733252253164 2024-12-03T18:57:58,636 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/549af462e760befc589617aa40c27d98/info/660ae6892c2a4bf2bf60f38f0ff84b0a, entries=19, sequenceid=82, filesize=24.9 K 2024-12-03T18:57:58,636 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,,1733252254297.549af462e760befc589617aa40c27d98., hostname=db5a5ccf5be8,38091,1733252253164, seqNum=2 , the old value is region=TestLogRolling-testLogRolling,,1733252254297.549af462e760befc589617aa40c27d98., hostname=db5a5ccf5be8,38091,1733252253164, seqNum=2, error=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=549af462e760befc589617aa40c27d98, server=db5a5ccf5be8,38091,1733252253164 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-03T18:57:58,636 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,,1733252254297.549af462e760befc589617aa40c27d98., hostname=db5a5ccf5be8,38091,1733252253164, seqNum=2 is org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=549af462e760befc589617aa40c27d98, server=db5a5ccf5be8,38091,1733252253164 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-03T18:57:58,636 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestLogRolling-testLogRolling,,1733252254297.549af462e760befc589617aa40c27d98., hostname=db5a5ccf5be8,38091,1733252253164, seqNum=2 because the exception is null or not the one we care about 2024-12-03T18:57:58,636 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~19.96 KB/20444, heapSize ~21.61 KB/22128, currentSize=10.51 KB/10760 for 549af462e760befc589617aa40c27d98 in 27ms, sequenceid=82, compaction requested=true 2024-12-03T18:57:58,637 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 549af462e760befc589617aa40c27d98: 2024-12-03T18:57:58,637 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=83.1 K, sizeToCheck=16.0 K 2024-12-03T18:57:58,637 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-03T18:57:58,637 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/549af462e760befc589617aa40c27d98/info/c7657e9567954c9d8a233b8f27033220 because midkey is the same as first or last row 2024-12-03T18:57:58,637 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 549af462e760befc589617aa40c27d98:info, priority=-2147483648, current under compaction store size is 1 2024-12-03T18:57:58,637 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T18:57:58,637 DEBUG [RS:0;db5a5ccf5be8:38091-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-03T18:57:58,638 DEBUG [RS:0;db5a5ccf5be8:38091-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 85094 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-03T18:57:58,638 DEBUG [RS:0;db5a5ccf5be8:38091-shortCompactions-0 {}] regionserver.HStore(1541): 549af462e760befc589617aa40c27d98/info is initiating minor compaction (all files) 2024-12-03T18:57:58,638 INFO [RS:0;db5a5ccf5be8:38091-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 549af462e760befc589617aa40c27d98/info in TestLogRolling-testLogRolling,,1733252254297.549af462e760befc589617aa40c27d98. 2024-12-03T18:57:58,638 INFO [RS:0;db5a5ccf5be8:38091-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/549af462e760befc589617aa40c27d98/info/c7657e9567954c9d8a233b8f27033220, hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/549af462e760befc589617aa40c27d98/info/0b9e91e0b5eb4f929e288fe9de7b4282, hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/549af462e760befc589617aa40c27d98/info/660ae6892c2a4bf2bf60f38f0ff84b0a] into tmpdir=hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/549af462e760befc589617aa40c27d98/.tmp, totalSize=83.1 K 2024-12-03T18:57:58,638 DEBUG [RS:0;db5a5ccf5be8:38091-shortCompactions-0 {}] compactions.Compactor(225): Compacting c7657e9567954c9d8a233b8f27033220, keycount=37, bloomtype=ROW, size=43.9 K, encoding=NONE, compression=NONE, seqNum=47, earliestPutTs=1733252264404 2024-12-03T18:57:58,639 DEBUG [RS:0;db5a5ccf5be8:38091-shortCompactions-0 {}] compactions.Compactor(225): Compacting 0b9e91e0b5eb4f929e288fe9de7b4282, keycount=9, bloomtype=ROW, size=14.3 K, encoding=NONE, compression=NONE, seqNum=59, earliestPutTs=1733252276529 2024-12-03T18:57:58,639 DEBUG [RS:0;db5a5ccf5be8:38091-shortCompactions-0 {}] compactions.Compactor(225): Compacting 660ae6892c2a4bf2bf60f38f0ff84b0a, keycount=19, bloomtype=ROW, size=24.9 K, encoding=NONE, compression=NONE, seqNum=82, earliestPutTs=1733252276564 2024-12-03T18:57:58,649 INFO [RS:0;db5a5ccf5be8:38091-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 549af462e760befc589617aa40c27d98#info#compaction#62 average throughput is 33.35 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-03T18:57:58,650 DEBUG [RS:0;db5a5ccf5be8:38091-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/549af462e760befc589617aa40c27d98/.tmp/info/8047aa4aba364985a72541625f874069 is 1080, key is row0001/info:/1733252264404/Put/seqid=0 2024-12-03T18:57:58,656 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33431 is added to blk_1073741843_1019 (size=75378) 2024-12-03T18:57:58,656 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44849 is added to blk_1073741843_1019 (size=75378) 2024-12-03T18:57:58,661 DEBUG [RS:0;db5a5ccf5be8:38091-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/549af462e760befc589617aa40c27d98/.tmp/info/8047aa4aba364985a72541625f874069 as hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/549af462e760befc589617aa40c27d98/info/8047aa4aba364985a72541625f874069 2024-12-03T18:57:58,667 INFO [RS:0;db5a5ccf5be8:38091-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 549af462e760befc589617aa40c27d98/info of 549af462e760befc589617aa40c27d98 into 8047aa4aba364985a72541625f874069(size=73.6 K), total size for store is 73.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T18:57:58,667 DEBUG [RS:0;db5a5ccf5be8:38091-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 549af462e760befc589617aa40c27d98: 2024-12-03T18:57:58,667 INFO [RS:0;db5a5ccf5be8:38091-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1733252254297.549af462e760befc589617aa40c27d98., storeName=549af462e760befc589617aa40c27d98/info, priority=13, startTime=1733252278637; duration=0sec 2024-12-03T18:57:58,667 DEBUG [RS:0;db5a5ccf5be8:38091-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=73.6 K, sizeToCheck=16.0 K 2024-12-03T18:57:58,668 DEBUG [RS:0;db5a5ccf5be8:38091-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-03T18:57:58,668 DEBUG [RS:0;db5a5ccf5be8:38091-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=73.6 K, sizeToCheck=16.0 K 2024-12-03T18:57:58,668 DEBUG [RS:0;db5a5ccf5be8:38091-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-03T18:57:58,668 DEBUG [RS:0;db5a5ccf5be8:38091-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=73.6 K, sizeToCheck=16.0 K 2024-12-03T18:57:58,668 DEBUG [RS:0;db5a5ccf5be8:38091-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-03T18:57:58,669 DEBUG [RS:0;db5a5ccf5be8:38091-shortCompactions-0 {}] regionserver.CompactSplit(239): Splitting TestLogRolling-testLogRolling,,1733252254297.549af462e760befc589617aa40c27d98., compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T18:57:58,669 DEBUG [RS:0;db5a5ccf5be8:38091-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T18:57:58,669 DEBUG [RS:0;db5a5ccf5be8:38091-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 549af462e760befc589617aa40c27d98:info 2024-12-03T18:57:58,670 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34991 {}] assignment.AssignmentManager(1363): Split request from db5a5ccf5be8,38091,1733252253164, parent={ENCODED => 549af462e760befc589617aa40c27d98, NAME => 'TestLogRolling-testLogRolling,,1733252254297.549af462e760befc589617aa40c27d98.', STARTKEY => '', ENDKEY => ''}, splitKey=row0062 2024-12-03T18:57:58,676 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34991 {}] assignment.SplitTableRegionProcedure(223): Splittable=true state=OPEN, location=db5a5ccf5be8,38091,1733252253164 2024-12-03T18:57:58,680 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34991 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=549af462e760befc589617aa40c27d98, daughterA=41df4623a1d17f001bcd00dd80ddad45, daughterB=84dd73c54d97c54fee00ffcbd27bacdc 2024-12-03T18:57:58,681 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=549af462e760befc589617aa40c27d98, daughterA=41df4623a1d17f001bcd00dd80ddad45, daughterB=84dd73c54d97c54fee00ffcbd27bacdc 2024-12-03T18:57:58,681 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=549af462e760befc589617aa40c27d98, daughterA=41df4623a1d17f001bcd00dd80ddad45, daughterB=84dd73c54d97c54fee00ffcbd27bacdc 2024-12-03T18:57:58,681 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=549af462e760befc589617aa40c27d98, daughterA=41df4623a1d17f001bcd00dd80ddad45, daughterB=84dd73c54d97c54fee00ffcbd27bacdc 2024-12-03T18:57:58,687 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=549af462e760befc589617aa40c27d98, UNASSIGN}] 2024-12-03T18:57:58,689 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=549af462e760befc589617aa40c27d98, UNASSIGN 2024-12-03T18:57:58,690 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=549af462e760befc589617aa40c27d98, regionState=CLOSING, regionLocation=db5a5ccf5be8,38091,1733252253164 2024-12-03T18:57:58,692 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=549af462e760befc589617aa40c27d98, UNASSIGN because future has completed 2024-12-03T18:57:58,693 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: true: evictOnSplit: true: evictOnClose: false 2024-12-03T18:57:58,693 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=9, ppid=8, state=RUNNABLE, hasLock=false; CloseRegionProcedure 549af462e760befc589617aa40c27d98, server=db5a5ccf5be8,38091,1733252253164}] 2024-12-03T18:57:58,850 INFO [RS_CLOSE_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(122): Close 549af462e760befc589617aa40c27d98 2024-12-03T18:57:58,850 DEBUG [RS_CLOSE_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(136): Unassign region: split region: true: evictCache: true 2024-12-03T18:57:58,850 DEBUG [RS_CLOSE_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1722): Closing 549af462e760befc589617aa40c27d98, disabling compactions & flushes 2024-12-03T18:57:58,851 INFO [RS_CLOSE_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1733252254297.549af462e760befc589617aa40c27d98. 2024-12-03T18:57:58,851 DEBUG [RS_CLOSE_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1733252254297.549af462e760befc589617aa40c27d98. 2024-12-03T18:57:58,851 DEBUG [RS_CLOSE_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1733252254297.549af462e760befc589617aa40c27d98. after waiting 0 ms 2024-12-03T18:57:58,851 DEBUG [RS_CLOSE_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1733252254297.549af462e760befc589617aa40c27d98. 2024-12-03T18:57:58,851 INFO [RS_CLOSE_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(2902): Flushing 549af462e760befc589617aa40c27d98 1/1 column families, dataSize=10.51 KB heapSize=11.50 KB 2024-12-03T18:57:58,855 DEBUG [RS_CLOSE_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/549af462e760befc589617aa40c27d98/.tmp/info/50b24cd1da3e4ea284893a0af659fbae is 1080, key is row0066/info:/1733252278611/Put/seqid=0 2024-12-03T18:57:58,859 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44849 is added to blk_1073741844_1020 (size=15740) 2024-12-03T18:57:58,859 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33431 is added to blk_1073741844_1020 (size=15740) 2024-12-03T18:57:58,860 INFO [RS_CLOSE_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=10.51 KB at sequenceid=96 (bloomFilter=true), to=hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/549af462e760befc589617aa40c27d98/.tmp/info/50b24cd1da3e4ea284893a0af659fbae 2024-12-03T18:57:58,865 DEBUG [RS_CLOSE_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/549af462e760befc589617aa40c27d98/.tmp/info/50b24cd1da3e4ea284893a0af659fbae as hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/549af462e760befc589617aa40c27d98/info/50b24cd1da3e4ea284893a0af659fbae 2024-12-03T18:57:58,871 INFO [RS_CLOSE_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/549af462e760befc589617aa40c27d98/info/50b24cd1da3e4ea284893a0af659fbae, entries=10, sequenceid=96, filesize=15.4 K 2024-12-03T18:57:58,872 INFO [RS_CLOSE_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(3140): Finished flush of dataSize ~10.51 KB/10760, heapSize ~11.48 KB/11760, currentSize=0 B/0 for 549af462e760befc589617aa40c27d98 in 20ms, sequenceid=96, compaction requested=false 2024-12-03T18:57:58,873 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733252254297.549af462e760befc589617aa40c27d98.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/549af462e760befc589617aa40c27d98/info/577f5c5208f94d8ab526a3e7d9d5e816, hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/549af462e760befc589617aa40c27d98/info/2b8ad4350c2e4746add3d3d52cdb29b0, hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/549af462e760befc589617aa40c27d98/info/c7657e9567954c9d8a233b8f27033220, hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/549af462e760befc589617aa40c27d98/info/09cb524a8f814f6aa9b738380c4a5a33, hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/549af462e760befc589617aa40c27d98/info/0b9e91e0b5eb4f929e288fe9de7b4282, hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/549af462e760befc589617aa40c27d98/info/660ae6892c2a4bf2bf60f38f0ff84b0a] to archive 2024-12-03T18:57:58,873 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733252254297.549af462e760befc589617aa40c27d98.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-03T18:57:58,875 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733252254297.549af462e760befc589617aa40c27d98.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/549af462e760befc589617aa40c27d98/info/577f5c5208f94d8ab526a3e7d9d5e816 to hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/archive/data/default/TestLogRolling-testLogRolling/549af462e760befc589617aa40c27d98/info/577f5c5208f94d8ab526a3e7d9d5e816 2024-12-03T18:57:58,876 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733252254297.549af462e760befc589617aa40c27d98.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/549af462e760befc589617aa40c27d98/info/2b8ad4350c2e4746add3d3d52cdb29b0 to hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/archive/data/default/TestLogRolling-testLogRolling/549af462e760befc589617aa40c27d98/info/2b8ad4350c2e4746add3d3d52cdb29b0 2024-12-03T18:57:58,877 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733252254297.549af462e760befc589617aa40c27d98.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/549af462e760befc589617aa40c27d98/info/c7657e9567954c9d8a233b8f27033220 to hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/archive/data/default/TestLogRolling-testLogRolling/549af462e760befc589617aa40c27d98/info/c7657e9567954c9d8a233b8f27033220 2024-12-03T18:57:58,878 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733252254297.549af462e760befc589617aa40c27d98.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/549af462e760befc589617aa40c27d98/info/09cb524a8f814f6aa9b738380c4a5a33 to hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/archive/data/default/TestLogRolling-testLogRolling/549af462e760befc589617aa40c27d98/info/09cb524a8f814f6aa9b738380c4a5a33 2024-12-03T18:57:58,879 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733252254297.549af462e760befc589617aa40c27d98.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/549af462e760befc589617aa40c27d98/info/0b9e91e0b5eb4f929e288fe9de7b4282 to hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/archive/data/default/TestLogRolling-testLogRolling/549af462e760befc589617aa40c27d98/info/0b9e91e0b5eb4f929e288fe9de7b4282 2024-12-03T18:57:58,880 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733252254297.549af462e760befc589617aa40c27d98.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/549af462e760befc589617aa40c27d98/info/660ae6892c2a4bf2bf60f38f0ff84b0a to hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/archive/data/default/TestLogRolling-testLogRolling/549af462e760befc589617aa40c27d98/info/660ae6892c2a4bf2bf60f38f0ff84b0a 2024-12-03T18:57:58,886 DEBUG [RS_CLOSE_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/549af462e760befc589617aa40c27d98/recovered.edits/99.seqid, newMaxSeqId=99, maxSeqId=1 2024-12-03T18:57:58,886 INFO [RS_CLOSE_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1733252254297.549af462e760befc589617aa40c27d98. 2024-12-03T18:57:58,886 DEBUG [RS_CLOSE_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1676): Region close journal for 549af462e760befc589617aa40c27d98: Waiting for close lock at 1733252278850Running coprocessor pre-close hooks at 1733252278850Disabling compacts and flushes for region at 1733252278850Disabling writes for close at 1733252278851 (+1 ms)Obtaining lock to block concurrent updates at 1733252278851Preparing flush snapshotting stores in 549af462e760befc589617aa40c27d98 at 1733252278851Finished memstore snapshotting TestLogRolling-testLogRolling,,1733252254297.549af462e760befc589617aa40c27d98., syncing WAL and waiting on mvcc, flushsize=dataSize=10760, getHeapSize=11760, getOffHeapSize=0, getCellsCount=10 at 1733252278851Flushing stores of TestLogRolling-testLogRolling,,1733252254297.549af462e760befc589617aa40c27d98. at 1733252278852 (+1 ms)Flushing 549af462e760befc589617aa40c27d98/info: creating writer at 1733252278852Flushing 549af462e760befc589617aa40c27d98/info: appending metadata at 1733252278854 (+2 ms)Flushing 549af462e760befc589617aa40c27d98/info: closing flushed file at 1733252278854Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1c1fc35c: reopening flushed file at 1733252278864 (+10 ms)Finished flush of dataSize ~10.51 KB/10760, heapSize ~11.48 KB/11760, currentSize=0 B/0 for 549af462e760befc589617aa40c27d98 in 20ms, sequenceid=96, compaction requested=false at 1733252278872 (+8 ms)Writing region close event to WAL at 1733252278882 (+10 ms)Running coprocessor post-close hooks at 1733252278886 (+4 ms)Closed at 1733252278886 2024-12-03T18:57:58,888 INFO [RS_CLOSE_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(157): Closed 549af462e760befc589617aa40c27d98 2024-12-03T18:57:58,889 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=549af462e760befc589617aa40c27d98, regionState=CLOSED 2024-12-03T18:57:58,891 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=9, ppid=8, state=RUNNABLE, hasLock=false; CloseRegionProcedure 549af462e760befc589617aa40c27d98, server=db5a5ccf5be8,38091,1733252253164 because future has completed 2024-12-03T18:57:58,894 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=9, resume processing ppid=8 2024-12-03T18:57:58,895 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, ppid=8, state=SUCCESS, hasLock=false; CloseRegionProcedure 549af462e760befc589617aa40c27d98, server=db5a5ccf5be8,38091,1733252253164 in 199 msec 2024-12-03T18:57:58,897 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-12-03T18:57:58,897 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=549af462e760befc589617aa40c27d98, UNASSIGN in 207 msec 2024-12-03T18:57:58,904 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T18:57:58,907 INFO [PEWorker-4 {}] assignment.SplitTableRegionProcedure(728): pid=7 splitting 2 storefiles, region=549af462e760befc589617aa40c27d98, threads=2 2024-12-03T18:57:58,909 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/549af462e760befc589617aa40c27d98/info/50b24cd1da3e4ea284893a0af659fbae for region: 549af462e760befc589617aa40c27d98 2024-12-03T18:57:58,909 DEBUG [StoreFileSplitter-pool-1 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/549af462e760befc589617aa40c27d98/info/8047aa4aba364985a72541625f874069 for region: 549af462e760befc589617aa40c27d98 2024-12-03T18:57:58,918 DEBUG [StoreFileSplitter-pool-0 {}] regionserver.HRegionFileSystem(650): Will create HFileLink file for hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/549af462e760befc589617aa40c27d98/info/50b24cd1da3e4ea284893a0af659fbae, top=true 2024-12-03T18:57:58,922 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33431 is added to blk_1073741845_1021 (size=27) 2024-12-03T18:57:58,922 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44849 is added to blk_1073741845_1021 (size=27) 2024-12-03T18:57:58,923 INFO [StoreFileSplitter-pool-0 {}] regionserver.HRegionFileSystem(691): Created linkFile:hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/info/TestLogRolling-testLogRolling=549af462e760befc589617aa40c27d98-50b24cd1da3e4ea284893a0af659fbae for child: 84dd73c54d97c54fee00ffcbd27bacdc, parent: 549af462e760befc589617aa40c27d98 2024-12-03T18:57:58,924 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/549af462e760befc589617aa40c27d98/info/50b24cd1da3e4ea284893a0af659fbae for region: 549af462e760befc589617aa40c27d98 2024-12-03T18:57:58,932 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33431 is added to blk_1073741846_1022 (size=27) 2024-12-03T18:57:58,932 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44849 is added to blk_1073741846_1022 (size=27) 2024-12-03T18:57:58,932 DEBUG [StoreFileSplitter-pool-1 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/549af462e760befc589617aa40c27d98/info/8047aa4aba364985a72541625f874069 for region: 549af462e760befc589617aa40c27d98 2024-12-03T18:57:58,934 DEBUG [PEWorker-4 {}] assignment.SplitTableRegionProcedure(802): pid=7 split storefiles for region 549af462e760befc589617aa40c27d98 Daughter A: [hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/41df4623a1d17f001bcd00dd80ddad45/info/8047aa4aba364985a72541625f874069.549af462e760befc589617aa40c27d98] storefiles, Daughter B: [hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/info/TestLogRolling-testLogRolling=549af462e760befc589617aa40c27d98-50b24cd1da3e4ea284893a0af659fbae, hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/info/8047aa4aba364985a72541625f874069.549af462e760befc589617aa40c27d98] storefiles. 2024-12-03T18:57:58,942 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44849 is added to blk_1073741847_1023 (size=71) 2024-12-03T18:57:58,943 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33431 is added to blk_1073741847_1023 (size=71) 2024-12-03T18:57:58,945 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T18:57:58,958 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44849 is added to blk_1073741848_1024 (size=71) 2024-12-03T18:57:58,958 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33431 is added to blk_1073741848_1024 (size=71) 2024-12-03T18:57:58,961 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T18:57:58,970 DEBUG [PEWorker-4 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/41df4623a1d17f001bcd00dd80ddad45/recovered.edits/99.seqid, newMaxSeqId=99, maxSeqId=-1 2024-12-03T18:57:58,972 DEBUG [PEWorker-4 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/recovered.edits/99.seqid, newMaxSeqId=99, maxSeqId=-1 2024-12-03T18:57:58,974 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,,1733252254297.549af462e760befc589617aa40c27d98.","families":{"info":[{"qualifier":"regioninfo","vlen":63,"tag":[],"timestamp":"1733252278974"},{"qualifier":"splitA","vlen":70,"tag":[],"timestamp":"1733252278974"},{"qualifier":"splitB","vlen":70,"tag":[],"timestamp":"1733252278974"}]},"ts":"1733252278974"} 2024-12-03T18:57:58,975 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,,1733252278676.41df4623a1d17f001bcd00dd80ddad45.","families":{"info":[{"qualifier":"regioninfo","vlen":70,"tag":[],"timestamp":"1733252278974"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733252278974"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1733252278974"}]},"ts":"1733252278974"} 2024-12-03T18:57:58,975 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,row0062,1733252278676.84dd73c54d97c54fee00ffcbd27bacdc.","families":{"info":[{"qualifier":"regioninfo","vlen":70,"tag":[],"timestamp":"1733252278974"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733252278974"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1733252278974"}]},"ts":"1733252278974"} 2024-12-03T18:57:58,992 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=41df4623a1d17f001bcd00dd80ddad45, ASSIGN}, {pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=84dd73c54d97c54fee00ffcbd27bacdc, ASSIGN}] 2024-12-03T18:57:58,993 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=41df4623a1d17f001bcd00dd80ddad45, ASSIGN 2024-12-03T18:57:58,993 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=84dd73c54d97c54fee00ffcbd27bacdc, ASSIGN 2024-12-03T18:57:58,994 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=41df4623a1d17f001bcd00dd80ddad45, ASSIGN; state=SPLITTING_NEW, location=db5a5ccf5be8,38091,1733252253164; forceNewPlan=false, retain=false 2024-12-03T18:57:58,994 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=84dd73c54d97c54fee00ffcbd27bacdc, ASSIGN; state=SPLITTING_NEW, location=db5a5ccf5be8,38091,1733252253164; forceNewPlan=false, retain=false 2024-12-03T18:57:59,145 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=11 updating hbase:meta row=84dd73c54d97c54fee00ffcbd27bacdc, regionState=OPENING, regionLocation=db5a5ccf5be8,38091,1733252253164 2024-12-03T18:57:59,145 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=10 updating hbase:meta row=41df4623a1d17f001bcd00dd80ddad45, regionState=OPENING, regionLocation=db5a5ccf5be8,38091,1733252253164 2024-12-03T18:57:59,147 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=84dd73c54d97c54fee00ffcbd27bacdc, ASSIGN because future has completed 2024-12-03T18:57:59,148 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=12, ppid=11, state=RUNNABLE, hasLock=false; OpenRegionProcedure 84dd73c54d97c54fee00ffcbd27bacdc, server=db5a5ccf5be8,38091,1733252253164}] 2024-12-03T18:57:59,149 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=41df4623a1d17f001bcd00dd80ddad45, ASSIGN because future has completed 2024-12-03T18:57:59,150 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=13, ppid=10, state=RUNNABLE, hasLock=false; OpenRegionProcedure 41df4623a1d17f001bcd00dd80ddad45, server=db5a5ccf5be8,38091,1733252253164}] 2024-12-03T18:57:59,304 INFO [RS_OPEN_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,,1733252278676.41df4623a1d17f001bcd00dd80ddad45. 2024-12-03T18:57:59,305 DEBUG [RS_OPEN_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7752): Opening region: {ENCODED => 41df4623a1d17f001bcd00dd80ddad45, NAME => 'TestLogRolling-testLogRolling,,1733252278676.41df4623a1d17f001bcd00dd80ddad45.', STARTKEY => '', ENDKEY => 'row0062'} 2024-12-03T18:57:59,305 DEBUG [RS_OPEN_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling 41df4623a1d17f001bcd00dd80ddad45 2024-12-03T18:57:59,305 DEBUG [RS_OPEN_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1733252278676.41df4623a1d17f001bcd00dd80ddad45.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T18:57:59,305 DEBUG [RS_OPEN_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7794): checking encryption for 41df4623a1d17f001bcd00dd80ddad45 2024-12-03T18:57:59,305 DEBUG [RS_OPEN_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7797): checking classloading for 41df4623a1d17f001bcd00dd80ddad45 2024-12-03T18:57:59,306 INFO [StoreOpener-41df4623a1d17f001bcd00dd80ddad45-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 41df4623a1d17f001bcd00dd80ddad45 2024-12-03T18:57:59,307 INFO [StoreOpener-41df4623a1d17f001bcd00dd80ddad45-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 41df4623a1d17f001bcd00dd80ddad45 columnFamilyName info 2024-12-03T18:57:59,307 DEBUG [StoreOpener-41df4623a1d17f001bcd00dd80ddad45-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T18:57:59,320 DEBUG [StoreOpener-41df4623a1d17f001bcd00dd80ddad45-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/41df4623a1d17f001bcd00dd80ddad45/info/8047aa4aba364985a72541625f874069.549af462e760befc589617aa40c27d98->hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/549af462e760befc589617aa40c27d98/info/8047aa4aba364985a72541625f874069-bottom 2024-12-03T18:57:59,320 INFO [StoreOpener-41df4623a1d17f001bcd00dd80ddad45-1 {}] regionserver.HStore(327): Store=41df4623a1d17f001bcd00dd80ddad45/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T18:57:59,321 DEBUG [RS_OPEN_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1038): replaying wal for 41df4623a1d17f001bcd00dd80ddad45 2024-12-03T18:57:59,321 DEBUG [RS_OPEN_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/41df4623a1d17f001bcd00dd80ddad45 2024-12-03T18:57:59,322 DEBUG [RS_OPEN_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/41df4623a1d17f001bcd00dd80ddad45 2024-12-03T18:57:59,323 DEBUG [RS_OPEN_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1048): stopping wal replay for 41df4623a1d17f001bcd00dd80ddad45 2024-12-03T18:57:59,323 DEBUG [RS_OPEN_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1060): Cleaning up temporary data for 41df4623a1d17f001bcd00dd80ddad45 2024-12-03T18:57:59,325 DEBUG [RS_OPEN_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1093): writing seq id for 41df4623a1d17f001bcd00dd80ddad45 2024-12-03T18:57:59,326 INFO [RS_OPEN_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1114): Opened 41df4623a1d17f001bcd00dd80ddad45; next sequenceid=100; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=821369, jitterRate=0.04442492127418518}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-03T18:57:59,326 DEBUG [RS_OPEN_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 41df4623a1d17f001bcd00dd80ddad45 2024-12-03T18:57:59,326 DEBUG [RS_OPEN_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1006): Region open journal for 41df4623a1d17f001bcd00dd80ddad45: Running coprocessor pre-open hook at 1733252279305Writing region info on filesystem at 1733252279305Initializing all the Stores at 1733252279306 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733252279306Cleaning up temporary data from old regions at 1733252279323 (+17 ms)Running coprocessor post-open hooks at 1733252279326 (+3 ms)Region opened successfully at 1733252279326 2024-12-03T18:57:59,327 INFO [RS_OPEN_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,,1733252278676.41df4623a1d17f001bcd00dd80ddad45., pid=13, masterSystemTime=1733252279301 2024-12-03T18:57:59,328 DEBUG [RS_OPEN_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.CompactSplit(403): Add compact mark for store 41df4623a1d17f001bcd00dd80ddad45:info, priority=-2147483648, current under compaction store size is 1 2024-12-03T18:57:59,328 DEBUG [RS:0;db5a5ccf5be8:38091-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 1 store files, 0 compacting, 1 eligible, 16 blocking 2024-12-03T18:57:59,328 DEBUG [RS_OPEN_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: Opening Region; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T18:57:59,328 INFO [RS:0;db5a5ccf5be8:38091-shortCompactions-0 {}] regionserver.HStore(1527): Keeping/Overriding Compaction request priority to -2147482648 for CF info since it belongs to recently split daughter region TestLogRolling-testLogRolling,,1733252278676.41df4623a1d17f001bcd00dd80ddad45. 2024-12-03T18:57:59,328 DEBUG [RS:0;db5a5ccf5be8:38091-shortCompactions-0 {}] regionserver.HStore(1541): 41df4623a1d17f001bcd00dd80ddad45/info is initiating minor compaction (all files) 2024-12-03T18:57:59,328 INFO [RS:0;db5a5ccf5be8:38091-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 41df4623a1d17f001bcd00dd80ddad45/info in TestLogRolling-testLogRolling,,1733252278676.41df4623a1d17f001bcd00dd80ddad45. 2024-12-03T18:57:59,329 INFO [RS:0;db5a5ccf5be8:38091-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/41df4623a1d17f001bcd00dd80ddad45/info/8047aa4aba364985a72541625f874069.549af462e760befc589617aa40c27d98->hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/549af462e760befc589617aa40c27d98/info/8047aa4aba364985a72541625f874069-bottom] into tmpdir=hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/41df4623a1d17f001bcd00dd80ddad45/.tmp, totalSize=73.6 K 2024-12-03T18:57:59,329 DEBUG [RS:0;db5a5ccf5be8:38091-shortCompactions-0 {}] compactions.Compactor(225): Compacting 8047aa4aba364985a72541625f874069.549af462e760befc589617aa40c27d98, keycount=32, bloomtype=ROW, size=73.6 K, encoding=NONE, compression=NONE, seqNum=82, earliestPutTs=1733252264404 2024-12-03T18:57:59,330 DEBUG [RS_OPEN_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,,1733252278676.41df4623a1d17f001bcd00dd80ddad45. 2024-12-03T18:57:59,330 INFO [RS_OPEN_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,,1733252278676.41df4623a1d17f001bcd00dd80ddad45. 2024-12-03T18:57:59,330 INFO [RS_OPEN_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,row0062,1733252278676.84dd73c54d97c54fee00ffcbd27bacdc. 2024-12-03T18:57:59,330 DEBUG [RS_OPEN_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7752): Opening region: {ENCODED => 84dd73c54d97c54fee00ffcbd27bacdc, NAME => 'TestLogRolling-testLogRolling,row0062,1733252278676.84dd73c54d97c54fee00ffcbd27bacdc.', STARTKEY => 'row0062', ENDKEY => ''} 2024-12-03T18:57:59,330 DEBUG [RS_OPEN_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling 84dd73c54d97c54fee00ffcbd27bacdc 2024-12-03T18:57:59,331 DEBUG [RS_OPEN_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,row0062,1733252278676.84dd73c54d97c54fee00ffcbd27bacdc.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T18:57:59,331 DEBUG [RS_OPEN_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7794): checking encryption for 84dd73c54d97c54fee00ffcbd27bacdc 2024-12-03T18:57:59,331 DEBUG [RS_OPEN_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7797): checking classloading for 84dd73c54d97c54fee00ffcbd27bacdc 2024-12-03T18:57:59,331 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=10 updating hbase:meta row=41df4623a1d17f001bcd00dd80ddad45, regionState=OPEN, openSeqNum=100, regionLocation=db5a5ccf5be8,38091,1733252253164 2024-12-03T18:57:59,332 INFO [StoreOpener-84dd73c54d97c54fee00ffcbd27bacdc-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 84dd73c54d97c54fee00ffcbd27bacdc 2024-12-03T18:57:59,332 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38091 {}] regionserver.HRegion(8855): Flush requested on 1588230740 2024-12-03T18:57:59,333 DEBUG [MemStoreFlusher.0 {}] regionserver.FlushAllLargeStoresPolicy(69): Since none of the CFs were above the size, flushing all. 2024-12-03T18:57:59,333 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=5.11 KB heapSize=8.96 KB 2024-12-03T18:57:59,333 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=13, ppid=10, state=RUNNABLE, hasLock=false; OpenRegionProcedure 41df4623a1d17f001bcd00dd80ddad45, server=db5a5ccf5be8,38091,1733252253164 because future has completed 2024-12-03T18:57:59,333 INFO [StoreOpener-84dd73c54d97c54fee00ffcbd27bacdc-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 84dd73c54d97c54fee00ffcbd27bacdc columnFamilyName info 2024-12-03T18:57:59,333 DEBUG [StoreOpener-84dd73c54d97c54fee00ffcbd27bacdc-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T18:57:59,349 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=13, resume processing ppid=10 2024-12-03T18:57:59,350 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, ppid=10, state=SUCCESS, hasLock=false; OpenRegionProcedure 41df4623a1d17f001bcd00dd80ddad45, server=db5a5ccf5be8,38091,1733252253164 in 184 msec 2024-12-03T18:57:59,350 INFO [RS:0;db5a5ccf5be8:38091-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 41df4623a1d17f001bcd00dd80ddad45#info#compaction#64 average throughput is 31.30 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-03T18:57:59,351 DEBUG [RS:0;db5a5ccf5be8:38091-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/41df4623a1d17f001bcd00dd80ddad45/.tmp/info/737907e3c4a74821a1ae3e3e508e41f1 is 1080, key is row0001/info:/1733252264404/Put/seqid=0 2024-12-03T18:57:59,351 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=41df4623a1d17f001bcd00dd80ddad45, ASSIGN in 358 msec 2024-12-03T18:57:59,358 DEBUG [StoreOpener-84dd73c54d97c54fee00ffcbd27bacdc-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/info/8047aa4aba364985a72541625f874069.549af462e760befc589617aa40c27d98->hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/549af462e760befc589617aa40c27d98/info/8047aa4aba364985a72541625f874069-top 2024-12-03T18:57:59,364 DEBUG [StoreOpener-84dd73c54d97c54fee00ffcbd27bacdc-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/info/TestLogRolling-testLogRolling=549af462e760befc589617aa40c27d98-50b24cd1da3e4ea284893a0af659fbae 2024-12-03T18:57:59,364 INFO [StoreOpener-84dd73c54d97c54fee00ffcbd27bacdc-1 {}] regionserver.HStore(327): Store=84dd73c54d97c54fee00ffcbd27bacdc/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T18:57:59,365 DEBUG [RS_OPEN_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1038): replaying wal for 84dd73c54d97c54fee00ffcbd27bacdc 2024-12-03T18:57:59,365 DEBUG [RS_OPEN_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc 2024-12-03T18:57:59,367 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/hbase/meta/1588230740/.tmp/info/05ee060b8a1e4a3581553ba23510d8b5 is 193, key is TestLogRolling-testLogRolling,row0062,1733252278676.84dd73c54d97c54fee00ffcbd27bacdc./info:regioninfo/1733252279145/Put/seqid=0 2024-12-03T18:57:59,367 DEBUG [RS_OPEN_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc 2024-12-03T18:57:59,367 DEBUG [RS_OPEN_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1048): stopping wal replay for 84dd73c54d97c54fee00ffcbd27bacdc 2024-12-03T18:57:59,367 DEBUG [RS_OPEN_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1060): Cleaning up temporary data for 84dd73c54d97c54fee00ffcbd27bacdc 2024-12-03T18:57:59,369 DEBUG [RS_OPEN_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1093): writing seq id for 84dd73c54d97c54fee00ffcbd27bacdc 2024-12-03T18:57:59,370 INFO [RS_OPEN_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1114): Opened 84dd73c54d97c54fee00ffcbd27bacdc; next sequenceid=100; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=785200, jitterRate=-0.0015669465065002441}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-03T18:57:59,370 DEBUG [RS_OPEN_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 84dd73c54d97c54fee00ffcbd27bacdc 2024-12-03T18:57:59,370 DEBUG [RS_OPEN_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1006): Region open journal for 84dd73c54d97c54fee00ffcbd27bacdc: Running coprocessor pre-open hook at 1733252279331Writing region info on filesystem at 1733252279331Initializing all the Stores at 1733252279332 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733252279332Cleaning up temporary data from old regions at 1733252279367 (+35 ms)Running coprocessor post-open hooks at 1733252279370 (+3 ms)Region opened successfully at 1733252279370 2024-12-03T18:57:59,371 INFO [RS_OPEN_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,row0062,1733252278676.84dd73c54d97c54fee00ffcbd27bacdc., pid=12, masterSystemTime=1733252279301 2024-12-03T18:57:59,371 DEBUG [RS_OPEN_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.CompactSplit(403): Add compact mark for store 84dd73c54d97c54fee00ffcbd27bacdc:info, priority=-2147483648, current under compaction store size is 2 2024-12-03T18:57:59,371 DEBUG [RS_OPEN_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: Opening Region; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T18:57:59,371 DEBUG [RS:0;db5a5ccf5be8:38091-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 2 store files, 0 compacting, 2 eligible, 16 blocking 2024-12-03T18:57:59,373 INFO [RS:0;db5a5ccf5be8:38091-longCompactions-0 {}] regionserver.HStore(1527): Keeping/Overriding Compaction request priority to -2147482648 for CF info since it belongs to recently split daughter region TestLogRolling-testLogRolling,row0062,1733252278676.84dd73c54d97c54fee00ffcbd27bacdc. 2024-12-03T18:57:59,373 DEBUG [RS:0;db5a5ccf5be8:38091-longCompactions-0 {}] regionserver.HStore(1541): 84dd73c54d97c54fee00ffcbd27bacdc/info is initiating minor compaction (all files) 2024-12-03T18:57:59,373 INFO [RS:0;db5a5ccf5be8:38091-longCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 84dd73c54d97c54fee00ffcbd27bacdc/info in TestLogRolling-testLogRolling,row0062,1733252278676.84dd73c54d97c54fee00ffcbd27bacdc. 2024-12-03T18:57:59,373 INFO [RS:0;db5a5ccf5be8:38091-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/info/8047aa4aba364985a72541625f874069.549af462e760befc589617aa40c27d98->hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/549af462e760befc589617aa40c27d98/info/8047aa4aba364985a72541625f874069-top, hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/info/TestLogRolling-testLogRolling=549af462e760befc589617aa40c27d98-50b24cd1da3e4ea284893a0af659fbae] into tmpdir=hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/.tmp, totalSize=89.0 K 2024-12-03T18:57:59,374 DEBUG [RS:0;db5a5ccf5be8:38091-longCompactions-0 {}] compactions.Compactor(225): Compacting 8047aa4aba364985a72541625f874069.549af462e760befc589617aa40c27d98, keycount=32, bloomtype=ROW, size=73.6 K, encoding=NONE, compression=NONE, seqNum=83, earliestPutTs=1733252264404 2024-12-03T18:57:59,374 DEBUG [RS_OPEN_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,row0062,1733252278676.84dd73c54d97c54fee00ffcbd27bacdc. 2024-12-03T18:57:59,374 INFO [RS_OPEN_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,row0062,1733252278676.84dd73c54d97c54fee00ffcbd27bacdc. 2024-12-03T18:57:59,374 DEBUG [RS:0;db5a5ccf5be8:38091-longCompactions-0 {}] compactions.Compactor(225): Compacting TestLogRolling-testLogRolling=549af462e760befc589617aa40c27d98-50b24cd1da3e4ea284893a0af659fbae, keycount=10, bloomtype=ROW, size=15.4 K, encoding=NONE, compression=NONE, seqNum=96, earliestPutTs=1733252278611 2024-12-03T18:57:59,375 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=11 updating hbase:meta row=84dd73c54d97c54fee00ffcbd27bacdc, regionState=OPEN, openSeqNum=100, regionLocation=db5a5ccf5be8,38091,1733252253164 2024-12-03T18:57:59,377 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=12, ppid=11, state=RUNNABLE, hasLock=false; OpenRegionProcedure 84dd73c54d97c54fee00ffcbd27bacdc, server=db5a5ccf5be8,38091,1733252253164 because future has completed 2024-12-03T18:57:59,378 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33431 is added to blk_1073741849_1025 (size=70862) 2024-12-03T18:57:59,378 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44849 is added to blk_1073741849_1025 (size=70862) 2024-12-03T18:57:59,386 DEBUG [RS:0;db5a5ccf5be8:38091-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/41df4623a1d17f001bcd00dd80ddad45/.tmp/info/737907e3c4a74821a1ae3e3e508e41f1 as hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/41df4623a1d17f001bcd00dd80ddad45/info/737907e3c4a74821a1ae3e3e508e41f1 2024-12-03T18:57:59,388 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=12, resume processing ppid=11 2024-12-03T18:57:59,388 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, ppid=11, state=SUCCESS, hasLock=false; OpenRegionProcedure 84dd73c54d97c54fee00ffcbd27bacdc, server=db5a5ccf5be8,38091,1733252253164 in 237 msec 2024-12-03T18:57:59,391 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=11, resume processing ppid=7 2024-12-03T18:57:59,391 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=84dd73c54d97c54fee00ffcbd27bacdc, ASSIGN in 396 msec 2024-12-03T18:57:59,394 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=549af462e760befc589617aa40c27d98, daughterA=41df4623a1d17f001bcd00dd80ddad45, daughterB=84dd73c54d97c54fee00ffcbd27bacdc in 715 msec 2024-12-03T18:57:59,396 INFO [RS:0;db5a5ccf5be8:38091-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 1 (all) file(s) in 41df4623a1d17f001bcd00dd80ddad45/info of 41df4623a1d17f001bcd00dd80ddad45 into 737907e3c4a74821a1ae3e3e508e41f1(size=69.2 K), total size for store is 69.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T18:57:59,396 DEBUG [RS:0;db5a5ccf5be8:38091-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 41df4623a1d17f001bcd00dd80ddad45: 2024-12-03T18:57:59,396 INFO [RS:0;db5a5ccf5be8:38091-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1733252278676.41df4623a1d17f001bcd00dd80ddad45., storeName=41df4623a1d17f001bcd00dd80ddad45/info, priority=15, startTime=1733252279327; duration=0sec 2024-12-03T18:57:59,396 DEBUG [RS:0;db5a5ccf5be8:38091-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T18:57:59,396 DEBUG [RS:0;db5a5ccf5be8:38091-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 41df4623a1d17f001bcd00dd80ddad45:info 2024-12-03T18:57:59,398 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33431 is added to blk_1073741850_1026 (size=9847) 2024-12-03T18:57:59,398 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44849 is added to blk_1073741850_1026 (size=9847) 2024-12-03T18:57:59,399 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.92 KB at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/hbase/meta/1588230740/.tmp/info/05ee060b8a1e4a3581553ba23510d8b5 2024-12-03T18:57:59,407 INFO [RS:0;db5a5ccf5be8:38091-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 84dd73c54d97c54fee00ffcbd27bacdc#info#compaction#66 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-03T18:57:59,407 DEBUG [RS:0;db5a5ccf5be8:38091-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/.tmp/info/c408df797d3243eaa1b720cae687e99e is 1080, key is row0062/info:/1733252276598/Put/seqid=0 2024-12-03T18:57:59,424 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/hbase/meta/1588230740/.tmp/ns/eeb666bfe74e416382b0e777cc5def19 is 43, key is default/ns:d/1733252254230/Put/seqid=0 2024-12-03T18:57:59,464 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,44293,1733252112865/db5a5ccf5be8%2C44293%2C1733252112865.1733252113091 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:57:59,469 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44849 is added to blk_1073741851_1027 (size=20230) 2024-12-03T18:57:59,470 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33431 is added to blk_1073741851_1027 (size=20230) 2024-12-03T18:57:59,472 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44849 is added to blk_1073741852_1028 (size=5153) 2024-12-03T18:57:59,472 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33431 is added to blk_1073741852_1028 (size=5153) 2024-12-03T18:57:59,473 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/hbase/meta/1588230740/.tmp/ns/eeb666bfe74e416382b0e777cc5def19 2024-12-03T18:57:59,475 DEBUG [RS:0;db5a5ccf5be8:38091-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/.tmp/info/c408df797d3243eaa1b720cae687e99e as hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/info/c408df797d3243eaa1b720cae687e99e 2024-12-03T18:57:59,483 INFO [RS:0;db5a5ccf5be8:38091-longCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 2 (all) file(s) in 84dd73c54d97c54fee00ffcbd27bacdc/info of 84dd73c54d97c54fee00ffcbd27bacdc into c408df797d3243eaa1b720cae687e99e(size=19.8 K), total size for store is 19.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T18:57:59,483 DEBUG [RS:0;db5a5ccf5be8:38091-longCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 84dd73c54d97c54fee00ffcbd27bacdc: 2024-12-03T18:57:59,483 INFO [RS:0;db5a5ccf5be8:38091-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1733252278676.84dd73c54d97c54fee00ffcbd27bacdc., storeName=84dd73c54d97c54fee00ffcbd27bacdc/info, priority=14, startTime=1733252279371; duration=0sec 2024-12-03T18:57:59,483 DEBUG [RS:0;db5a5ccf5be8:38091-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T18:57:59,483 DEBUG [RS:0;db5a5ccf5be8:38091-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 84dd73c54d97c54fee00ffcbd27bacdc:info 2024-12-03T18:57:59,495 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.meta.1733252112663.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:57:59,496 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/hbase/meta/1588230740/.tmp/table/d074efae88fc4d519f4e43313e77f453 is 65, key is TestLogRolling-testLogRolling/table:state/1733252254663/Put/seqid=0 2024-12-03T18:57:59,500 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33431 is added to blk_1073741853_1029 (size=5340) 2024-12-03T18:57:59,500 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44849 is added to blk_1073741853_1029 (size=5340) 2024-12-03T18:57:59,501 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=122 B at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/hbase/meta/1588230740/.tmp/table/d074efae88fc4d519f4e43313e77f453 2024-12-03T18:57:59,507 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/hbase/meta/1588230740/.tmp/info/05ee060b8a1e4a3581553ba23510d8b5 as hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/hbase/meta/1588230740/info/05ee060b8a1e4a3581553ba23510d8b5 2024-12-03T18:57:59,512 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/hbase/meta/1588230740/info/05ee060b8a1e4a3581553ba23510d8b5, entries=30, sequenceid=17, filesize=9.6 K 2024-12-03T18:57:59,513 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/hbase/meta/1588230740/.tmp/ns/eeb666bfe74e416382b0e777cc5def19 as hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/hbase/meta/1588230740/ns/eeb666bfe74e416382b0e777cc5def19 2024-12-03T18:57:59,519 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/hbase/meta/1588230740/ns/eeb666bfe74e416382b0e777cc5def19, entries=2, sequenceid=17, filesize=5.0 K 2024-12-03T18:57:59,520 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/hbase/meta/1588230740/.tmp/table/d074efae88fc4d519f4e43313e77f453 as hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/hbase/meta/1588230740/table/d074efae88fc4d519f4e43313e77f453 2024-12-03T18:57:59,526 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/hbase/meta/1588230740/table/d074efae88fc4d519f4e43313e77f453, entries=2, sequenceid=17, filesize=5.2 K 2024-12-03T18:57:59,527 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~5.11 KB/5234, heapSize ~8.66 KB/8872, currentSize=705 B/705 for 1588230740 in 193ms, sequenceid=17, compaction requested=false 2024-12-03T18:57:59,527 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-12-03T18:58:00,464 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,44293,1733252112865/db5a5ccf5be8%2C44293%2C1733252112865.1733252113091 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:58:00,496 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.meta.1733252112663.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:58:01,465 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,44293,1733252112865/db5a5ccf5be8%2C44293%2C1733252112865.1733252113091 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:58:01,497 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.meta.1733252112663.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:58:02,465 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,44293,1733252112865/db5a5ccf5be8%2C44293%2C1733252112865.1733252113091 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:58:02,497 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.meta.1733252112663.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:58:02,974 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-03T18:58:03,466 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,44293,1733252112865/db5a5ccf5be8%2C44293%2C1733252112865.1733252113091 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:58:03,498 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.meta.1733252112663.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:58:03,887 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:58:03,887 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:58:03,887 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:58:03,887 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:58:03,887 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:58:03,887 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:58:03,888 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:58:03,888 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:58:03,906 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:58:03,906 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:58:03,906 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:58:03,906 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:58:03,906 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:58:03,907 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:58:03,909 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:58:03,909 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:58:03,910 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:58:03,912 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:58:04,418 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-03T18:58:04,419 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:58:04,420 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:58:04,420 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:58:04,420 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:58:04,421 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:58:04,421 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:58:04,422 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:58:04,422 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:58:04,443 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:58:04,444 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:58:04,444 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:58:04,444 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:58:04,444 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:58:04,444 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:58:04,447 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:58:04,447 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:58:04,447 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:58:04,450 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:58:04,467 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,44293,1733252112865/db5a5ccf5be8%2C44293%2C1733252112865.1733252113091 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:58:04,499 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.meta.1733252112663.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:58:05,467 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,44293,1733252112865/db5a5ccf5be8%2C44293%2C1733252112865.1733252113091 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:58:05,499 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.meta.1733252112663.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:58:06,467 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,44293,1733252112865/db5a5ccf5be8%2C44293%2C1733252112865.1733252113091 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:58:06,500 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.meta.1733252112663.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:58:07,468 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,44293,1733252112865/db5a5ccf5be8%2C44293%2C1733252112865.1733252113091 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:58:07,500 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.meta.1733252112663.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:58:08,469 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,44293,1733252112865/db5a5ccf5be8%2C44293%2C1733252112865.1733252113091 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:58:08,501 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.meta.1733252112663.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:58:08,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38091 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:60092 deadline: 1733252298649, exception=org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1733252254297.549af462e760befc589617aa40c27d98. is not online on db5a5ccf5be8,38091,1733252253164 2024-12-03T18:58:08,650 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,,1733252254297.549af462e760befc589617aa40c27d98., hostname=db5a5ccf5be8,38091,1733252253164, seqNum=2 , the old value is region=TestLogRolling-testLogRolling,,1733252254297.549af462e760befc589617aa40c27d98., hostname=db5a5ccf5be8,38091,1733252253164, seqNum=2, error=org.apache.hadoop.hbase.NotServingRegionException: org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1733252254297.549af462e760befc589617aa40c27d98. is not online on db5a5ccf5be8,38091,1733252253164 at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegionByEncodedName(HRegionServer.java:3186) at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegion(HRegionServer.java:3164) at org.apache.hadoop.hbase.regionserver.RSRpcServices.getRegion(RSRpcServices.java:1413) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2943) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-03T18:58:08,650 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,,1733252254297.549af462e760befc589617aa40c27d98., hostname=db5a5ccf5be8,38091,1733252253164, seqNum=2 is org.apache.hadoop.hbase.NotServingRegionException: org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1733252254297.549af462e760befc589617aa40c27d98. is not online on db5a5ccf5be8,38091,1733252253164 at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegionByEncodedName(HRegionServer.java:3186) at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegion(HRegionServer.java:3164) at org.apache.hadoop.hbase.regionserver.RSRpcServices.getRegion(RSRpcServices.java:1413) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2943) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-03T18:58:08,650 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(88): Try removing region=TestLogRolling-testLogRolling,,1733252254297.549af462e760befc589617aa40c27d98., hostname=db5a5ccf5be8,38091,1733252253164, seqNum=2 from cache 2024-12-03T18:58:09,469 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,44293,1733252112865/db5a5ccf5be8%2C44293%2C1733252112865.1733252113091 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:58:09,501 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.meta.1733252112663.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:58:10,470 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,44293,1733252112865/db5a5ccf5be8%2C44293%2C1733252112865.1733252113091 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:58:10,502 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.meta.1733252112663.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:58:11,471 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,44293,1733252112865/db5a5ccf5be8%2C44293%2C1733252112865.1733252113091 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:58:11,503 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.meta.1733252112663.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:58:12,471 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,44293,1733252112865/db5a5ccf5be8%2C44293%2C1733252112865.1733252113091 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:58:12,503 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.meta.1733252112663.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:58:13,472 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,44293,1733252112865/db5a5ccf5be8%2C44293%2C1733252112865.1733252113091 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:58:13,504 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.meta.1733252112663.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:58:14,281 INFO [master/db5a5ccf5be8:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-12-03T18:58:14,281 INFO [master/db5a5ccf5be8:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-12-03T18:58:14,473 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,44293,1733252112865/db5a5ccf5be8%2C44293%2C1733252112865.1733252113091 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:58:14,504 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.meta.1733252112663.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:58:15,474 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,44293,1733252112865/db5a5ccf5be8%2C44293%2C1733252112865.1733252113091 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:58:15,505 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.meta.1733252112663.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:58:16,474 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,44293,1733252112865/db5a5ccf5be8%2C44293%2C1733252112865.1733252113091 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:58:16,506 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.meta.1733252112663.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:58:17,475 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,44293,1733252112865/db5a5ccf5be8%2C44293%2C1733252112865.1733252113091 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:58:17,506 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.meta.1733252112663.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:58:18,475 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,44293,1733252112865/db5a5ccf5be8%2C44293%2C1733252112865.1733252113091 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:58:18,507 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.meta.1733252112663.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:58:19,125 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 1588230740, had cached 0 bytes from a total of 20340 2024-12-03T18:58:19,476 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,44293,1733252112865/db5a5ccf5be8%2C44293%2C1733252112865.1733252113091 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:58:19,507 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.meta.1733252112663.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:58:20,477 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,44293,1733252112865/db5a5ccf5be8%2C44293%2C1733252112865.1733252113091 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:58:20,508 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.meta.1733252112663.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:58:21,478 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,44293,1733252112865/db5a5ccf5be8%2C44293%2C1733252112865.1733252113091 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:58:21,509 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.meta.1733252112663.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:58:22,478 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,44293,1733252112865/db5a5ccf5be8%2C44293%2C1733252112865.1733252113091 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:58:22,509 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.meta.1733252112663.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:58:23,479 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,44293,1733252112865/db5a5ccf5be8%2C44293%2C1733252112865.1733252113091 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:58:23,510 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.meta.1733252112663.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:58:24,479 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,44293,1733252112865/db5a5ccf5be8%2C44293%2C1733252112865.1733252113091 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:58:24,511 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.meta.1733252112663.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:58:25,480 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,44293,1733252112865/db5a5ccf5be8%2C44293%2C1733252112865.1733252113091 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:58:25,511 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.meta.1733252112663.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:58:26,481 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,44293,1733252112865/db5a5ccf5be8%2C44293%2C1733252112865.1733252113091 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:58:26,512 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.meta.1733252112663.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:58:27,482 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,44293,1733252112865/db5a5ccf5be8%2C44293%2C1733252112865.1733252113091 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:58:27,512 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.meta.1733252112663.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:58:28,442 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): data stats (chunk size=2097152): current pool size=4, created chunk count=9, reused chunk count=56, reuseRatio=86.15% 2024-12-03T18:58:28,443 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): index stats (chunk size=209715): current pool size=0, created chunk count=0, reused chunk count=0, reuseRatio=0 2024-12-03T18:58:28,482 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,44293,1733252112865/db5a5ccf5be8%2C44293%2C1733252112865.1733252113091 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:58:28,513 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.meta.1733252112663.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:58:28,854 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRolling', row='row0076', locateType=CURRENT is [region=TestLogRolling-testLogRolling,row0062,1733252278676.84dd73c54d97c54fee00ffcbd27bacdc., hostname=db5a5ccf5be8,38091,1733252253164, seqNum=100] 2024-12-03T18:58:28,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38091 {}] regionserver.HRegion(8855): Flush requested on 84dd73c54d97c54fee00ffcbd27bacdc 2024-12-03T18:58:28,868 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 84dd73c54d97c54fee00ffcbd27bacdc 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-03T18:58:28,874 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/.tmp/info/607c7c5868f842d39ae04a24bf82bf21 is 1080, key is row0076/info:/1733252308855/Put/seqid=0 2024-12-03T18:58:28,882 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33431 is added to blk_1073741854_1030 (size=12509) 2024-12-03T18:58:28,882 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44849 is added to blk_1073741854_1030 (size=12509) 2024-12-03T18:58:29,283 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=110 (bloomFilter=true), to=hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/.tmp/info/607c7c5868f842d39ae04a24bf82bf21 2024-12-03T18:58:29,291 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/.tmp/info/607c7c5868f842d39ae04a24bf82bf21 as hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/info/607c7c5868f842d39ae04a24bf82bf21 2024-12-03T18:58:29,298 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/info/607c7c5868f842d39ae04a24bf82bf21, entries=7, sequenceid=110, filesize=12.2 K 2024-12-03T18:58:29,300 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=14.71 KB/15064 for 84dd73c54d97c54fee00ffcbd27bacdc in 432ms, sequenceid=110, compaction requested=false 2024-12-03T18:58:29,300 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 84dd73c54d97c54fee00ffcbd27bacdc: 2024-12-03T18:58:29,483 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,44293,1733252112865/db5a5ccf5be8%2C44293%2C1733252112865.1733252113091 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:58:29,514 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.meta.1733252112663.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:58:30,484 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,44293,1733252112865/db5a5ccf5be8%2C44293%2C1733252112865.1733252113091 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:58:30,514 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.meta.1733252112663.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:58:30,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38091 {}] regionserver.HRegion(8855): Flush requested on 84dd73c54d97c54fee00ffcbd27bacdc 2024-12-03T18:58:30,902 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 84dd73c54d97c54fee00ffcbd27bacdc 1/1 column families, dataSize=15.76 KB heapSize=17.13 KB 2024-12-03T18:58:30,907 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/.tmp/info/b3e560bfd7ae4e449b8e72a7f3b62c5f is 1080, key is row0083/info:/1733252308869/Put/seqid=0 2024-12-03T18:58:30,914 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33431 is added to blk_1073741855_1031 (size=21141) 2024-12-03T18:58:30,915 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44849 is added to blk_1073741855_1031 (size=21141) 2024-12-03T18:58:30,915 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.76 KB at sequenceid=128 (bloomFilter=true), to=hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/.tmp/info/b3e560bfd7ae4e449b8e72a7f3b62c5f 2024-12-03T18:58:30,921 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/.tmp/info/b3e560bfd7ae4e449b8e72a7f3b62c5f as hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/info/b3e560bfd7ae4e449b8e72a7f3b62c5f 2024-12-03T18:58:30,927 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/info/b3e560bfd7ae4e449b8e72a7f3b62c5f, entries=15, sequenceid=128, filesize=20.6 K 2024-12-03T18:58:30,928 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~15.76 KB/16140, heapSize ~17.11 KB/17520, currentSize=11.56 KB/11836 for 84dd73c54d97c54fee00ffcbd27bacdc in 26ms, sequenceid=128, compaction requested=true 2024-12-03T18:58:30,929 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 84dd73c54d97c54fee00ffcbd27bacdc: 2024-12-03T18:58:30,929 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 84dd73c54d97c54fee00ffcbd27bacdc:info, priority=-2147483648, current under compaction store size is 1 2024-12-03T18:58:30,929 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T18:58:30,929 DEBUG [RS:0;db5a5ccf5be8:38091-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-03T18:58:30,930 DEBUG [RS:0;db5a5ccf5be8:38091-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 53880 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-03T18:58:30,930 DEBUG [RS:0;db5a5ccf5be8:38091-shortCompactions-0 {}] regionserver.HStore(1541): 84dd73c54d97c54fee00ffcbd27bacdc/info is initiating minor compaction (all files) 2024-12-03T18:58:30,930 INFO [RS:0;db5a5ccf5be8:38091-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 84dd73c54d97c54fee00ffcbd27bacdc/info in TestLogRolling-testLogRolling,row0062,1733252278676.84dd73c54d97c54fee00ffcbd27bacdc. 2024-12-03T18:58:30,930 INFO [RS:0;db5a5ccf5be8:38091-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/info/c408df797d3243eaa1b720cae687e99e, hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/info/607c7c5868f842d39ae04a24bf82bf21, hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/info/b3e560bfd7ae4e449b8e72a7f3b62c5f] into tmpdir=hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/.tmp, totalSize=52.6 K 2024-12-03T18:58:30,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38091 {}] regionserver.HRegion(8855): Flush requested on 84dd73c54d97c54fee00ffcbd27bacdc 2024-12-03T18:58:30,930 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 84dd73c54d97c54fee00ffcbd27bacdc 1/1 column families, dataSize=13.66 KB heapSize=14.88 KB 2024-12-03T18:58:30,931 DEBUG [RS:0;db5a5ccf5be8:38091-shortCompactions-0 {}] compactions.Compactor(225): Compacting c408df797d3243eaa1b720cae687e99e, keycount=14, bloomtype=ROW, size=19.8 K, encoding=NONE, compression=NONE, seqNum=96, earliestPutTs=1733252276598 2024-12-03T18:58:30,931 DEBUG [RS:0;db5a5ccf5be8:38091-shortCompactions-0 {}] compactions.Compactor(225): Compacting 607c7c5868f842d39ae04a24bf82bf21, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=110, earliestPutTs=1733252308855 2024-12-03T18:58:30,931 DEBUG [RS:0;db5a5ccf5be8:38091-shortCompactions-0 {}] compactions.Compactor(225): Compacting b3e560bfd7ae4e449b8e72a7f3b62c5f, keycount=15, bloomtype=ROW, size=20.6 K, encoding=NONE, compression=NONE, seqNum=128, earliestPutTs=1733252308869 2024-12-03T18:58:30,935 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/.tmp/info/83ebfadc8db44d51a625013620b4fd28 is 1080, key is row0098/info:/1733252310903/Put/seqid=0 2024-12-03T18:58:30,950 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33431 is added to blk_1073741856_1032 (size=19000) 2024-12-03T18:58:30,951 INFO [RS:0;db5a5ccf5be8:38091-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 84dd73c54d97c54fee00ffcbd27bacdc#info#compaction#72 average throughput is 18.47 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-03T18:58:30,951 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44849 is added to blk_1073741856_1032 (size=19000) 2024-12-03T18:58:30,951 DEBUG [RS:0;db5a5ccf5be8:38091-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/.tmp/info/23e8f628685b441ba67eab2bd8a3b9a2 is 1080, key is row0062/info:/1733252276598/Put/seqid=0 2024-12-03T18:58:30,952 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.66 KB at sequenceid=144 (bloomFilter=true), to=hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/.tmp/info/83ebfadc8db44d51a625013620b4fd28 2024-12-03T18:58:30,956 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33431 is added to blk_1073741857_1033 (size=44066) 2024-12-03T18:58:30,956 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44849 is added to blk_1073741857_1033 (size=44066) 2024-12-03T18:58:30,959 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/.tmp/info/83ebfadc8db44d51a625013620b4fd28 as hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/info/83ebfadc8db44d51a625013620b4fd28 2024-12-03T18:58:30,963 DEBUG [RS:0;db5a5ccf5be8:38091-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/.tmp/info/23e8f628685b441ba67eab2bd8a3b9a2 as hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/info/23e8f628685b441ba67eab2bd8a3b9a2 2024-12-03T18:58:30,965 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/info/83ebfadc8db44d51a625013620b4fd28, entries=13, sequenceid=144, filesize=18.6 K 2024-12-03T18:58:30,966 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~13.66 KB/13988, heapSize ~14.86 KB/15216, currentSize=16.81 KB/17216 for 84dd73c54d97c54fee00ffcbd27bacdc in 36ms, sequenceid=144, compaction requested=false 2024-12-03T18:58:30,966 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 84dd73c54d97c54fee00ffcbd27bacdc: 2024-12-03T18:58:30,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38091 {}] regionserver.HRegion(8855): Flush requested on 84dd73c54d97c54fee00ffcbd27bacdc 2024-12-03T18:58:30,968 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 84dd73c54d97c54fee00ffcbd27bacdc 1/1 column families, dataSize=17.86 KB heapSize=19.38 KB 2024-12-03T18:58:30,971 INFO [RS:0;db5a5ccf5be8:38091-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 84dd73c54d97c54fee00ffcbd27bacdc/info of 84dd73c54d97c54fee00ffcbd27bacdc into 23e8f628685b441ba67eab2bd8a3b9a2(size=43.0 K), total size for store is 61.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T18:58:30,971 DEBUG [RS:0;db5a5ccf5be8:38091-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 84dd73c54d97c54fee00ffcbd27bacdc: 2024-12-03T18:58:30,971 INFO [RS:0;db5a5ccf5be8:38091-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1733252278676.84dd73c54d97c54fee00ffcbd27bacdc., storeName=84dd73c54d97c54fee00ffcbd27bacdc/info, priority=13, startTime=1733252310929; duration=0sec 2024-12-03T18:58:30,971 DEBUG [RS:0;db5a5ccf5be8:38091-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T18:58:30,971 DEBUG [RS:0;db5a5ccf5be8:38091-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 84dd73c54d97c54fee00ffcbd27bacdc:info 2024-12-03T18:58:30,972 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/.tmp/info/7b669b8ff7e9438e9c7507be3502847f is 1080, key is row0111/info:/1733252310932/Put/seqid=0 2024-12-03T18:58:30,975 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44849 is added to blk_1073741858_1034 (size=23316) 2024-12-03T18:58:30,976 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33431 is added to blk_1073741858_1034 (size=23316) 2024-12-03T18:58:30,976 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.86 KB at sequenceid=164 (bloomFilter=true), to=hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/.tmp/info/7b669b8ff7e9438e9c7507be3502847f 2024-12-03T18:58:30,981 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/.tmp/info/7b669b8ff7e9438e9c7507be3502847f as hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/info/7b669b8ff7e9438e9c7507be3502847f 2024-12-03T18:58:30,987 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/info/7b669b8ff7e9438e9c7507be3502847f, entries=17, sequenceid=164, filesize=22.8 K 2024-12-03T18:58:30,988 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~17.86 KB/18292, heapSize ~19.36 KB/19824, currentSize=1.05 KB/1076 for 84dd73c54d97c54fee00ffcbd27bacdc in 19ms, sequenceid=164, compaction requested=true 2024-12-03T18:58:30,988 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 84dd73c54d97c54fee00ffcbd27bacdc: 2024-12-03T18:58:30,988 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 84dd73c54d97c54fee00ffcbd27bacdc:info, priority=-2147483648, current under compaction store size is 1 2024-12-03T18:58:30,988 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T18:58:30,988 DEBUG [RS:0;db5a5ccf5be8:38091-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-03T18:58:30,989 DEBUG [RS:0;db5a5ccf5be8:38091-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 86382 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-03T18:58:30,989 DEBUG [RS:0;db5a5ccf5be8:38091-shortCompactions-0 {}] regionserver.HStore(1541): 84dd73c54d97c54fee00ffcbd27bacdc/info is initiating minor compaction (all files) 2024-12-03T18:58:30,989 INFO [RS:0;db5a5ccf5be8:38091-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 84dd73c54d97c54fee00ffcbd27bacdc/info in TestLogRolling-testLogRolling,row0062,1733252278676.84dd73c54d97c54fee00ffcbd27bacdc. 2024-12-03T18:58:30,989 INFO [RS:0;db5a5ccf5be8:38091-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/info/23e8f628685b441ba67eab2bd8a3b9a2, hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/info/83ebfadc8db44d51a625013620b4fd28, hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/info/7b669b8ff7e9438e9c7507be3502847f] into tmpdir=hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/.tmp, totalSize=84.4 K 2024-12-03T18:58:30,989 DEBUG [RS:0;db5a5ccf5be8:38091-shortCompactions-0 {}] compactions.Compactor(225): Compacting 23e8f628685b441ba67eab2bd8a3b9a2, keycount=36, bloomtype=ROW, size=43.0 K, encoding=NONE, compression=NONE, seqNum=128, earliestPutTs=1733252276598 2024-12-03T18:58:30,990 DEBUG [RS:0;db5a5ccf5be8:38091-shortCompactions-0 {}] compactions.Compactor(225): Compacting 83ebfadc8db44d51a625013620b4fd28, keycount=13, bloomtype=ROW, size=18.6 K, encoding=NONE, compression=NONE, seqNum=144, earliestPutTs=1733252310903 2024-12-03T18:58:30,990 DEBUG [RS:0;db5a5ccf5be8:38091-shortCompactions-0 {}] compactions.Compactor(225): Compacting 7b669b8ff7e9438e9c7507be3502847f, keycount=17, bloomtype=ROW, size=22.8 K, encoding=NONE, compression=NONE, seqNum=164, earliestPutTs=1733252310932 2024-12-03T18:58:31,000 INFO [RS:0;db5a5ccf5be8:38091-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 84dd73c54d97c54fee00ffcbd27bacdc#info#compaction#74 average throughput is 67.73 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-03T18:58:31,001 DEBUG [RS:0;db5a5ccf5be8:38091-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/.tmp/info/8e182317387749f0a057f5acbbb6e8e5 is 1080, key is row0062/info:/1733252276598/Put/seqid=0 2024-12-03T18:58:31,004 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44849 is added to blk_1073741859_1035 (size=76649) 2024-12-03T18:58:31,004 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33431 is added to blk_1073741859_1035 (size=76649) 2024-12-03T18:58:31,010 DEBUG [RS:0;db5a5ccf5be8:38091-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/.tmp/info/8e182317387749f0a057f5acbbb6e8e5 as hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/info/8e182317387749f0a057f5acbbb6e8e5 2024-12-03T18:58:31,016 INFO [RS:0;db5a5ccf5be8:38091-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 84dd73c54d97c54fee00ffcbd27bacdc/info of 84dd73c54d97c54fee00ffcbd27bacdc into 8e182317387749f0a057f5acbbb6e8e5(size=74.9 K), total size for store is 74.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T18:58:31,016 DEBUG [RS:0;db5a5ccf5be8:38091-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 84dd73c54d97c54fee00ffcbd27bacdc: 2024-12-03T18:58:31,016 INFO [RS:0;db5a5ccf5be8:38091-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1733252278676.84dd73c54d97c54fee00ffcbd27bacdc., storeName=84dd73c54d97c54fee00ffcbd27bacdc/info, priority=13, startTime=1733252310988; duration=0sec 2024-12-03T18:58:31,016 DEBUG [RS:0;db5a5ccf5be8:38091-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T18:58:31,016 DEBUG [RS:0;db5a5ccf5be8:38091-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 84dd73c54d97c54fee00ffcbd27bacdc:info 2024-12-03T18:58:31,484 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,44293,1733252112865/db5a5ccf5be8%2C44293%2C1733252112865.1733252113091 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:58:31,515 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.meta.1733252112663.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:58:32,485 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,44293,1733252112865/db5a5ccf5be8%2C44293%2C1733252112865.1733252113091 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:58:32,515 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.meta.1733252112663.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:58:32,974 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-03T18:58:32,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38091 {}] regionserver.HRegion(8855): Flush requested on 84dd73c54d97c54fee00ffcbd27bacdc 2024-12-03T18:58:32,985 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 84dd73c54d97c54fee00ffcbd27bacdc 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-03T18:58:32,990 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/.tmp/info/a6480b9a5cb94d6799e8143f21b14bdb is 1080, key is row0128/info:/1733252310969/Put/seqid=0 2024-12-03T18:58:32,994 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33431 is added to blk_1073741860_1036 (size=12516) 2024-12-03T18:58:32,995 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44849 is added to blk_1073741860_1036 (size=12516) 2024-12-03T18:58:32,995 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=176 (bloomFilter=true), to=hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/.tmp/info/a6480b9a5cb94d6799e8143f21b14bdb 2024-12-03T18:58:33,002 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/.tmp/info/a6480b9a5cb94d6799e8143f21b14bdb as hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/info/a6480b9a5cb94d6799e8143f21b14bdb 2024-12-03T18:58:33,008 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/info/a6480b9a5cb94d6799e8143f21b14bdb, entries=7, sequenceid=176, filesize=12.2 K 2024-12-03T18:58:33,008 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=9.46 KB/9684 for 84dd73c54d97c54fee00ffcbd27bacdc in 23ms, sequenceid=176, compaction requested=false 2024-12-03T18:58:33,008 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 84dd73c54d97c54fee00ffcbd27bacdc: 2024-12-03T18:58:33,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38091 {}] regionserver.HRegion(8855): Flush requested on 84dd73c54d97c54fee00ffcbd27bacdc 2024-12-03T18:58:33,010 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 84dd73c54d97c54fee00ffcbd27bacdc 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-12-03T18:58:33,015 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/.tmp/info/fcf7c2b9e37a4d2a9e10e44152f6e741 is 1080, key is row0135/info:/1733252312987/Put/seqid=0 2024-12-03T18:58:33,027 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33431 is added to blk_1073741861_1037 (size=16828) 2024-12-03T18:58:33,027 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44849 is added to blk_1073741861_1037 (size=16828) 2024-12-03T18:58:33,028 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=190 (bloomFilter=true), to=hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/.tmp/info/fcf7c2b9e37a4d2a9e10e44152f6e741 2024-12-03T18:58:33,035 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/.tmp/info/fcf7c2b9e37a4d2a9e10e44152f6e741 as hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/info/fcf7c2b9e37a4d2a9e10e44152f6e741 2024-12-03T18:58:33,041 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/info/fcf7c2b9e37a4d2a9e10e44152f6e741, entries=11, sequenceid=190, filesize=16.4 K 2024-12-03T18:58:33,042 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=13.66 KB/13988 for 84dd73c54d97c54fee00ffcbd27bacdc in 32ms, sequenceid=190, compaction requested=true 2024-12-03T18:58:33,042 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 84dd73c54d97c54fee00ffcbd27bacdc: 2024-12-03T18:58:33,042 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 84dd73c54d97c54fee00ffcbd27bacdc:info, priority=-2147483648, current under compaction store size is 1 2024-12-03T18:58:33,042 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T18:58:33,042 DEBUG [RS:0;db5a5ccf5be8:38091-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-03T18:58:33,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38091 {}] regionserver.HRegion(8855): Flush requested on 84dd73c54d97c54fee00ffcbd27bacdc 2024-12-03T18:58:33,043 DEBUG [RS:0;db5a5ccf5be8:38091-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 105993 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-03T18:58:33,043 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 84dd73c54d97c54fee00ffcbd27bacdc 1/1 column families, dataSize=14.71 KB heapSize=16 KB 2024-12-03T18:58:33,043 DEBUG [RS:0;db5a5ccf5be8:38091-shortCompactions-0 {}] regionserver.HStore(1541): 84dd73c54d97c54fee00ffcbd27bacdc/info is initiating minor compaction (all files) 2024-12-03T18:58:33,043 INFO [RS:0;db5a5ccf5be8:38091-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 84dd73c54d97c54fee00ffcbd27bacdc/info in TestLogRolling-testLogRolling,row0062,1733252278676.84dd73c54d97c54fee00ffcbd27bacdc. 2024-12-03T18:58:33,043 INFO [RS:0;db5a5ccf5be8:38091-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/info/8e182317387749f0a057f5acbbb6e8e5, hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/info/a6480b9a5cb94d6799e8143f21b14bdb, hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/info/fcf7c2b9e37a4d2a9e10e44152f6e741] into tmpdir=hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/.tmp, totalSize=103.5 K 2024-12-03T18:58:33,044 DEBUG [RS:0;db5a5ccf5be8:38091-shortCompactions-0 {}] compactions.Compactor(225): Compacting 8e182317387749f0a057f5acbbb6e8e5, keycount=66, bloomtype=ROW, size=74.9 K, encoding=NONE, compression=NONE, seqNum=164, earliestPutTs=1733252276598 2024-12-03T18:58:33,044 DEBUG [RS:0;db5a5ccf5be8:38091-shortCompactions-0 {}] compactions.Compactor(225): Compacting a6480b9a5cb94d6799e8143f21b14bdb, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=176, earliestPutTs=1733252310969 2024-12-03T18:58:33,045 DEBUG [RS:0;db5a5ccf5be8:38091-shortCompactions-0 {}] compactions.Compactor(225): Compacting fcf7c2b9e37a4d2a9e10e44152f6e741, keycount=11, bloomtype=ROW, size=16.4 K, encoding=NONE, compression=NONE, seqNum=190, earliestPutTs=1733252312987 2024-12-03T18:58:33,049 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/.tmp/info/a7ff160018f5477d83ef9db6e40bd824 is 1080, key is row0146/info:/1733252313012/Put/seqid=0 2024-12-03T18:58:33,058 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44849 is added to blk_1073741862_1038 (size=20078) 2024-12-03T18:58:33,058 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33431 is added to blk_1073741862_1038 (size=20078) 2024-12-03T18:58:33,058 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=14.71 KB at sequenceid=207 (bloomFilter=true), to=hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/.tmp/info/a7ff160018f5477d83ef9db6e40bd824 2024-12-03T18:58:33,059 INFO [RS:0;db5a5ccf5be8:38091-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 84dd73c54d97c54fee00ffcbd27bacdc#info#compaction#78 average throughput is 43.10 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-03T18:58:33,060 DEBUG [RS:0;db5a5ccf5be8:38091-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/.tmp/info/cc96b6ea9ce243acad2c9cfd713a9ca3 is 1080, key is row0062/info:/1733252276598/Put/seqid=0 2024-12-03T18:58:33,063 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33431 is added to blk_1073741863_1039 (size=96159) 2024-12-03T18:58:33,064 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44849 is added to blk_1073741863_1039 (size=96159) 2024-12-03T18:58:33,066 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/.tmp/info/a7ff160018f5477d83ef9db6e40bd824 as hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/info/a7ff160018f5477d83ef9db6e40bd824 2024-12-03T18:58:33,071 DEBUG [RS:0;db5a5ccf5be8:38091-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/.tmp/info/cc96b6ea9ce243acad2c9cfd713a9ca3 as hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/info/cc96b6ea9ce243acad2c9cfd713a9ca3 2024-12-03T18:58:33,072 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/info/a7ff160018f5477d83ef9db6e40bd824, entries=14, sequenceid=207, filesize=19.6 K 2024-12-03T18:58:33,073 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~14.71 KB/15064, heapSize ~15.98 KB/16368, currentSize=1.05 KB/1076 for 84dd73c54d97c54fee00ffcbd27bacdc in 30ms, sequenceid=207, compaction requested=false 2024-12-03T18:58:33,073 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 84dd73c54d97c54fee00ffcbd27bacdc: 2024-12-03T18:58:33,078 INFO [RS:0;db5a5ccf5be8:38091-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 84dd73c54d97c54fee00ffcbd27bacdc/info of 84dd73c54d97c54fee00ffcbd27bacdc into cc96b6ea9ce243acad2c9cfd713a9ca3(size=93.9 K), total size for store is 113.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T18:58:33,078 DEBUG [RS:0;db5a5ccf5be8:38091-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 84dd73c54d97c54fee00ffcbd27bacdc: 2024-12-03T18:58:33,078 INFO [RS:0;db5a5ccf5be8:38091-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1733252278676.84dd73c54d97c54fee00ffcbd27bacdc., storeName=84dd73c54d97c54fee00ffcbd27bacdc/info, priority=13, startTime=1733252313042; duration=0sec 2024-12-03T18:58:33,078 DEBUG [RS:0;db5a5ccf5be8:38091-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T18:58:33,078 DEBUG [RS:0;db5a5ccf5be8:38091-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 84dd73c54d97c54fee00ffcbd27bacdc:info 2024-12-03T18:58:33,486 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,44293,1733252112865/db5a5ccf5be8%2C44293%2C1733252112865.1733252113091 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:58:33,516 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.meta.1733252112663.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:58:34,486 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,44293,1733252112865/db5a5ccf5be8%2C44293%2C1733252112865.1733252113091 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:58:34,517 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.meta.1733252112663.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:58:35,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38091 {}] regionserver.HRegion(8855): Flush requested on 84dd73c54d97c54fee00ffcbd27bacdc 2024-12-03T18:58:35,060 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 84dd73c54d97c54fee00ffcbd27bacdc 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-03T18:58:35,066 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/.tmp/info/461400d33b2243b19adf9f7fdab1bdf9 is 1080, key is row0160/info:/1733252313046/Put/seqid=0 2024-12-03T18:58:35,071 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44849 is added to blk_1073741864_1040 (size=12516) 2024-12-03T18:58:35,071 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33431 is added to blk_1073741864_1040 (size=12516) 2024-12-03T18:58:35,072 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=218 (bloomFilter=true), to=hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/.tmp/info/461400d33b2243b19adf9f7fdab1bdf9 2024-12-03T18:58:35,078 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/.tmp/info/461400d33b2243b19adf9f7fdab1bdf9 as hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/info/461400d33b2243b19adf9f7fdab1bdf9 2024-12-03T18:58:35,084 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/info/461400d33b2243b19adf9f7fdab1bdf9, entries=7, sequenceid=218, filesize=12.2 K 2024-12-03T18:58:35,085 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=10.51 KB/10760 for 84dd73c54d97c54fee00ffcbd27bacdc in 25ms, sequenceid=218, compaction requested=true 2024-12-03T18:58:35,085 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 84dd73c54d97c54fee00ffcbd27bacdc: 2024-12-03T18:58:35,085 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 84dd73c54d97c54fee00ffcbd27bacdc:info, priority=-2147483648, current under compaction store size is 1 2024-12-03T18:58:35,085 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T18:58:35,085 DEBUG [RS:0;db5a5ccf5be8:38091-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-03T18:58:35,086 DEBUG [RS:0;db5a5ccf5be8:38091-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 128753 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-03T18:58:35,086 DEBUG [RS:0;db5a5ccf5be8:38091-shortCompactions-0 {}] regionserver.HStore(1541): 84dd73c54d97c54fee00ffcbd27bacdc/info is initiating minor compaction (all files) 2024-12-03T18:58:35,086 INFO [RS:0;db5a5ccf5be8:38091-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 84dd73c54d97c54fee00ffcbd27bacdc/info in TestLogRolling-testLogRolling,row0062,1733252278676.84dd73c54d97c54fee00ffcbd27bacdc. 2024-12-03T18:58:35,086 INFO [RS:0;db5a5ccf5be8:38091-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/info/cc96b6ea9ce243acad2c9cfd713a9ca3, hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/info/a7ff160018f5477d83ef9db6e40bd824, hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/info/461400d33b2243b19adf9f7fdab1bdf9] into tmpdir=hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/.tmp, totalSize=125.7 K 2024-12-03T18:58:35,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38091 {}] regionserver.HRegion(8855): Flush requested on 84dd73c54d97c54fee00ffcbd27bacdc 2024-12-03T18:58:35,087 DEBUG [RS:0;db5a5ccf5be8:38091-shortCompactions-0 {}] compactions.Compactor(225): Compacting cc96b6ea9ce243acad2c9cfd713a9ca3, keycount=84, bloomtype=ROW, size=93.9 K, encoding=NONE, compression=NONE, seqNum=190, earliestPutTs=1733252276598 2024-12-03T18:58:35,087 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 84dd73c54d97c54fee00ffcbd27bacdc 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-12-03T18:58:35,087 DEBUG [RS:0;db5a5ccf5be8:38091-shortCompactions-0 {}] compactions.Compactor(225): Compacting a7ff160018f5477d83ef9db6e40bd824, keycount=14, bloomtype=ROW, size=19.6 K, encoding=NONE, compression=NONE, seqNum=207, earliestPutTs=1733252313012 2024-12-03T18:58:35,088 DEBUG [RS:0;db5a5ccf5be8:38091-shortCompactions-0 {}] compactions.Compactor(225): Compacting 461400d33b2243b19adf9f7fdab1bdf9, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=218, earliestPutTs=1733252313046 2024-12-03T18:58:35,091 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/.tmp/info/522a047ed296451795e80936c7528a98 is 1080, key is row0167/info:/1733252315062/Put/seqid=0 2024-12-03T18:58:35,103 INFO [RS:0;db5a5ccf5be8:38091-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 84dd73c54d97c54fee00ffcbd27bacdc#info#compaction#81 average throughput is 53.87 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-03T18:58:35,104 DEBUG [RS:0;db5a5ccf5be8:38091-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/.tmp/info/a0833655fc794bad8fed0bce19a17090 is 1080, key is row0062/info:/1733252276598/Put/seqid=0 2024-12-03T18:58:35,106 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33431 is added to blk_1073741865_1041 (size=17906) 2024-12-03T18:58:35,106 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44849 is added to blk_1073741865_1041 (size=17906) 2024-12-03T18:58:35,107 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=233 (bloomFilter=true), to=hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/.tmp/info/522a047ed296451795e80936c7528a98 2024-12-03T18:58:35,112 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/.tmp/info/522a047ed296451795e80936c7528a98 as hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/info/522a047ed296451795e80936c7528a98 2024-12-03T18:58:35,118 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33431 is added to blk_1073741866_1042 (size=118903) 2024-12-03T18:58:35,118 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44849 is added to blk_1073741866_1042 (size=118903) 2024-12-03T18:58:35,119 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/info/522a047ed296451795e80936c7528a98, entries=12, sequenceid=233, filesize=17.5 K 2024-12-03T18:58:35,120 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=14.71 KB/15064 for 84dd73c54d97c54fee00ffcbd27bacdc in 33ms, sequenceid=233, compaction requested=false 2024-12-03T18:58:35,120 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 84dd73c54d97c54fee00ffcbd27bacdc: 2024-12-03T18:58:35,124 DEBUG [RS:0;db5a5ccf5be8:38091-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/.tmp/info/a0833655fc794bad8fed0bce19a17090 as hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/info/a0833655fc794bad8fed0bce19a17090 2024-12-03T18:58:35,131 INFO [RS:0;db5a5ccf5be8:38091-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 84dd73c54d97c54fee00ffcbd27bacdc/info of 84dd73c54d97c54fee00ffcbd27bacdc into a0833655fc794bad8fed0bce19a17090(size=116.1 K), total size for store is 133.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T18:58:35,131 DEBUG [RS:0;db5a5ccf5be8:38091-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 84dd73c54d97c54fee00ffcbd27bacdc: 2024-12-03T18:58:35,131 INFO [RS:0;db5a5ccf5be8:38091-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1733252278676.84dd73c54d97c54fee00ffcbd27bacdc., storeName=84dd73c54d97c54fee00ffcbd27bacdc/info, priority=13, startTime=1733252315085; duration=0sec 2024-12-03T18:58:35,131 DEBUG [RS:0;db5a5ccf5be8:38091-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T18:58:35,131 DEBUG [RS:0;db5a5ccf5be8:38091-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 84dd73c54d97c54fee00ffcbd27bacdc:info 2024-12-03T18:58:35,187 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:58:35,188 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:58:35,188 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:58:35,188 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:58:35,188 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:58:35,188 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:58:35,189 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:58:35,189 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:58:35,213 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:58:35,213 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:58:35,213 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:58:35,213 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:58:35,213 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:58:35,214 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:58:35,216 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:58:35,217 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:58:35,217 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:58:35,219 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:58:35,487 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,44293,1733252112865/db5a5ccf5be8%2C44293%2C1733252112865.1733252113091 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:58:35,517 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.meta.1733252112663.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:58:35,726 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-03T18:58:35,727 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:58:35,727 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:58:35,727 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:58:35,727 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:58:35,727 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:58:35,727 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:58:35,728 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:58:35,728 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:58:35,749 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:58:35,749 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:58:35,749 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:58:35,750 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:58:35,750 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:58:35,750 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:58:35,753 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:58:35,753 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:58:35,753 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:58:35,756 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:58:36,488 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,44293,1733252112865/db5a5ccf5be8%2C44293%2C1733252112865.1733252113091 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:58:36,518 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.meta.1733252112663.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:58:37,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38091 {}] regionserver.HRegion(8855): Flush requested on 84dd73c54d97c54fee00ffcbd27bacdc 2024-12-03T18:58:37,118 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 84dd73c54d97c54fee00ffcbd27bacdc 1/1 column families, dataSize=15.76 KB heapSize=17.13 KB 2024-12-03T18:58:37,122 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/.tmp/info/d3371dfcf68d48f19d03aa5342cc88a6 is 1080, key is row0179/info:/1733252315088/Put/seqid=0 2024-12-03T18:58:37,127 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44849 is added to blk_1073741867_1043 (size=21156) 2024-12-03T18:58:37,128 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33431 is added to blk_1073741867_1043 (size=21156) 2024-12-03T18:58:37,128 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.76 KB at sequenceid=252 (bloomFilter=true), to=hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/.tmp/info/d3371dfcf68d48f19d03aa5342cc88a6 2024-12-03T18:58:37,135 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/.tmp/info/d3371dfcf68d48f19d03aa5342cc88a6 as hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/info/d3371dfcf68d48f19d03aa5342cc88a6 2024-12-03T18:58:37,141 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/info/d3371dfcf68d48f19d03aa5342cc88a6, entries=15, sequenceid=252, filesize=20.7 K 2024-12-03T18:58:37,142 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~15.76 KB/16140, heapSize ~17.11 KB/17520, currentSize=9.46 KB/9684 for 84dd73c54d97c54fee00ffcbd27bacdc in 24ms, sequenceid=252, compaction requested=true 2024-12-03T18:58:37,142 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 84dd73c54d97c54fee00ffcbd27bacdc: 2024-12-03T18:58:37,142 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 84dd73c54d97c54fee00ffcbd27bacdc:info, priority=-2147483648, current under compaction store size is 1 2024-12-03T18:58:37,142 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T18:58:37,142 DEBUG [RS:0;db5a5ccf5be8:38091-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-03T18:58:37,143 DEBUG [RS:0;db5a5ccf5be8:38091-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 157965 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-03T18:58:37,143 DEBUG [RS:0;db5a5ccf5be8:38091-shortCompactions-0 {}] regionserver.HStore(1541): 84dd73c54d97c54fee00ffcbd27bacdc/info is initiating minor compaction (all files) 2024-12-03T18:58:37,143 INFO [RS:0;db5a5ccf5be8:38091-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 84dd73c54d97c54fee00ffcbd27bacdc/info in TestLogRolling-testLogRolling,row0062,1733252278676.84dd73c54d97c54fee00ffcbd27bacdc. 2024-12-03T18:58:37,143 INFO [RS:0;db5a5ccf5be8:38091-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/info/a0833655fc794bad8fed0bce19a17090, hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/info/522a047ed296451795e80936c7528a98, hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/info/d3371dfcf68d48f19d03aa5342cc88a6] into tmpdir=hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/.tmp, totalSize=154.3 K 2024-12-03T18:58:37,144 DEBUG [RS:0;db5a5ccf5be8:38091-shortCompactions-0 {}] compactions.Compactor(225): Compacting a0833655fc794bad8fed0bce19a17090, keycount=105, bloomtype=ROW, size=116.1 K, encoding=NONE, compression=NONE, seqNum=218, earliestPutTs=1733252276598 2024-12-03T18:58:37,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38091 {}] regionserver.HRegion(8855): Flush requested on 84dd73c54d97c54fee00ffcbd27bacdc 2024-12-03T18:58:37,144 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 84dd73c54d97c54fee00ffcbd27bacdc 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-12-03T18:58:37,144 DEBUG [RS:0;db5a5ccf5be8:38091-shortCompactions-0 {}] compactions.Compactor(225): Compacting 522a047ed296451795e80936c7528a98, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=233, earliestPutTs=1733252315062 2024-12-03T18:58:37,144 DEBUG [RS:0;db5a5ccf5be8:38091-shortCompactions-0 {}] compactions.Compactor(225): Compacting d3371dfcf68d48f19d03aa5342cc88a6, keycount=15, bloomtype=ROW, size=20.7 K, encoding=NONE, compression=NONE, seqNum=252, earliestPutTs=1733252315088 2024-12-03T18:58:37,148 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/.tmp/info/f17c7eb84e6e484b8067d5a2a6aa19fd is 1080, key is row0194/info:/1733252317119/Put/seqid=0 2024-12-03T18:58:37,157 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44849 is added to blk_1073741868_1044 (size=16837) 2024-12-03T18:58:37,158 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33431 is added to blk_1073741868_1044 (size=16837) 2024-12-03T18:58:37,158 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=266 (bloomFilter=true), to=hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/.tmp/info/f17c7eb84e6e484b8067d5a2a6aa19fd 2024-12-03T18:58:37,167 INFO [RS:0;db5a5ccf5be8:38091-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 84dd73c54d97c54fee00ffcbd27bacdc#info#compaction#84 average throughput is 67.73 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-03T18:58:37,168 DEBUG [RS:0;db5a5ccf5be8:38091-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/.tmp/info/d330e73e544d4e22be82b190a0508c5a is 1080, key is row0062/info:/1733252276598/Put/seqid=0 2024-12-03T18:58:37,172 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/.tmp/info/f17c7eb84e6e484b8067d5a2a6aa19fd as hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/info/f17c7eb84e6e484b8067d5a2a6aa19fd 2024-12-03T18:58:37,174 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44849 is added to blk_1073741869_1045 (size=148316) 2024-12-03T18:58:37,175 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33431 is added to blk_1073741869_1045 (size=148316) 2024-12-03T18:58:37,178 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/info/f17c7eb84e6e484b8067d5a2a6aa19fd, entries=11, sequenceid=266, filesize=16.4 K 2024-12-03T18:58:37,179 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=16.81 KB/17216 for 84dd73c54d97c54fee00ffcbd27bacdc in 35ms, sequenceid=266, compaction requested=false 2024-12-03T18:58:37,179 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 84dd73c54d97c54fee00ffcbd27bacdc: 2024-12-03T18:58:37,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38091 {}] regionserver.HRegion(8855): Flush requested on 84dd73c54d97c54fee00ffcbd27bacdc 2024-12-03T18:58:37,180 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 84dd73c54d97c54fee00ffcbd27bacdc 1/1 column families, dataSize=17.86 KB heapSize=19.38 KB 2024-12-03T18:58:37,183 DEBUG [RS:0;db5a5ccf5be8:38091-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/.tmp/info/d330e73e544d4e22be82b190a0508c5a as hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/info/d330e73e544d4e22be82b190a0508c5a 2024-12-03T18:58:37,185 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/.tmp/info/fe9d5dffc87f488391797eb7f147c705 is 1080, key is row0205/info:/1733252317145/Put/seqid=0 2024-12-03T18:58:37,189 INFO [RS:0;db5a5ccf5be8:38091-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 84dd73c54d97c54fee00ffcbd27bacdc/info of 84dd73c54d97c54fee00ffcbd27bacdc into d330e73e544d4e22be82b190a0508c5a(size=144.8 K), total size for store is 161.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T18:58:37,189 DEBUG [RS:0;db5a5ccf5be8:38091-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 84dd73c54d97c54fee00ffcbd27bacdc: 2024-12-03T18:58:37,189 INFO [RS:0;db5a5ccf5be8:38091-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1733252278676.84dd73c54d97c54fee00ffcbd27bacdc., storeName=84dd73c54d97c54fee00ffcbd27bacdc/info, priority=13, startTime=1733252317142; duration=0sec 2024-12-03T18:58:37,189 DEBUG [RS:0;db5a5ccf5be8:38091-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T18:58:37,189 DEBUG [RS:0;db5a5ccf5be8:38091-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 84dd73c54d97c54fee00ffcbd27bacdc:info 2024-12-03T18:58:37,189 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44849 is added to blk_1073741870_1046 (size=23333) 2024-12-03T18:58:37,189 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33431 is added to blk_1073741870_1046 (size=23333) 2024-12-03T18:58:37,190 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.86 KB at sequenceid=286 (bloomFilter=true), to=hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/.tmp/info/fe9d5dffc87f488391797eb7f147c705 2024-12-03T18:58:37,195 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/.tmp/info/fe9d5dffc87f488391797eb7f147c705 as hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/info/fe9d5dffc87f488391797eb7f147c705 2024-12-03T18:58:37,200 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/info/fe9d5dffc87f488391797eb7f147c705, entries=17, sequenceid=286, filesize=22.8 K 2024-12-03T18:58:37,201 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~17.86 KB/18292, heapSize ~19.36 KB/19824, currentSize=3.15 KB/3228 for 84dd73c54d97c54fee00ffcbd27bacdc in 21ms, sequenceid=286, compaction requested=true 2024-12-03T18:58:37,201 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 84dd73c54d97c54fee00ffcbd27bacdc: 2024-12-03T18:58:37,201 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 84dd73c54d97c54fee00ffcbd27bacdc:info, priority=-2147483648, current under compaction store size is 1 2024-12-03T18:58:37,201 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T18:58:37,201 DEBUG [RS:0;db5a5ccf5be8:38091-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-03T18:58:37,202 DEBUG [RS:0;db5a5ccf5be8:38091-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 188486 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-03T18:58:37,202 DEBUG [RS:0;db5a5ccf5be8:38091-shortCompactions-0 {}] regionserver.HStore(1541): 84dd73c54d97c54fee00ffcbd27bacdc/info is initiating minor compaction (all files) 2024-12-03T18:58:37,202 INFO [RS:0;db5a5ccf5be8:38091-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 84dd73c54d97c54fee00ffcbd27bacdc/info in TestLogRolling-testLogRolling,row0062,1733252278676.84dd73c54d97c54fee00ffcbd27bacdc. 2024-12-03T18:58:37,202 INFO [RS:0;db5a5ccf5be8:38091-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/info/d330e73e544d4e22be82b190a0508c5a, hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/info/f17c7eb84e6e484b8067d5a2a6aa19fd, hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/info/fe9d5dffc87f488391797eb7f147c705] into tmpdir=hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/.tmp, totalSize=184.1 K 2024-12-03T18:58:37,203 DEBUG [RS:0;db5a5ccf5be8:38091-shortCompactions-0 {}] compactions.Compactor(225): Compacting d330e73e544d4e22be82b190a0508c5a, keycount=132, bloomtype=ROW, size=144.8 K, encoding=NONE, compression=NONE, seqNum=252, earliestPutTs=1733252276598 2024-12-03T18:58:37,203 DEBUG [RS:0;db5a5ccf5be8:38091-shortCompactions-0 {}] compactions.Compactor(225): Compacting f17c7eb84e6e484b8067d5a2a6aa19fd, keycount=11, bloomtype=ROW, size=16.4 K, encoding=NONE, compression=NONE, seqNum=266, earliestPutTs=1733252317119 2024-12-03T18:58:37,203 DEBUG [RS:0;db5a5ccf5be8:38091-shortCompactions-0 {}] compactions.Compactor(225): Compacting fe9d5dffc87f488391797eb7f147c705, keycount=17, bloomtype=ROW, size=22.8 K, encoding=NONE, compression=NONE, seqNum=286, earliestPutTs=1733252317145 2024-12-03T18:58:37,214 INFO [RS:0;db5a5ccf5be8:38091-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 84dd73c54d97c54fee00ffcbd27bacdc#info#compaction#86 average throughput is 54.73 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-03T18:58:37,215 DEBUG [RS:0;db5a5ccf5be8:38091-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/.tmp/info/29aa68b27cb0441f8aa0b4eccfd5d960 is 1080, key is row0062/info:/1733252276598/Put/seqid=0 2024-12-03T18:58:37,217 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44849 is added to blk_1073741871_1047 (size=178636) 2024-12-03T18:58:37,218 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33431 is added to blk_1073741871_1047 (size=178636) 2024-12-03T18:58:37,223 DEBUG [RS:0;db5a5ccf5be8:38091-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/.tmp/info/29aa68b27cb0441f8aa0b4eccfd5d960 as hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/info/29aa68b27cb0441f8aa0b4eccfd5d960 2024-12-03T18:58:37,228 INFO [RS:0;db5a5ccf5be8:38091-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 84dd73c54d97c54fee00ffcbd27bacdc/info of 84dd73c54d97c54fee00ffcbd27bacdc into 29aa68b27cb0441f8aa0b4eccfd5d960(size=174.4 K), total size for store is 174.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T18:58:37,228 DEBUG [RS:0;db5a5ccf5be8:38091-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 84dd73c54d97c54fee00ffcbd27bacdc: 2024-12-03T18:58:37,229 INFO [RS:0;db5a5ccf5be8:38091-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1733252278676.84dd73c54d97c54fee00ffcbd27bacdc., storeName=84dd73c54d97c54fee00ffcbd27bacdc/info, priority=13, startTime=1733252317201; duration=0sec 2024-12-03T18:58:37,229 DEBUG [RS:0;db5a5ccf5be8:38091-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T18:58:37,229 DEBUG [RS:0;db5a5ccf5be8:38091-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 84dd73c54d97c54fee00ffcbd27bacdc:info 2024-12-03T18:58:37,488 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,44293,1733252112865/db5a5ccf5be8%2C44293%2C1733252112865.1733252113091 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:58:37,518 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.meta.1733252112663.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:58:38,489 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,44293,1733252112865/db5a5ccf5be8%2C44293%2C1733252112865.1733252113091 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:58:38,519 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.meta.1733252112663.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:58:39,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38091 {}] regionserver.HRegion(8855): Flush requested on 84dd73c54d97c54fee00ffcbd27bacdc 2024-12-03T18:58:39,196 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 84dd73c54d97c54fee00ffcbd27bacdc 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-03T18:58:39,201 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/.tmp/info/25ae82cf996e4eb9a7ffb974d896042c is 1080, key is row0222/info:/1733252317181/Put/seqid=0 2024-12-03T18:58:39,206 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33431 is added to blk_1073741872_1048 (size=12523) 2024-12-03T18:58:39,207 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=298 (bloomFilter=true), to=hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/.tmp/info/25ae82cf996e4eb9a7ffb974d896042c 2024-12-03T18:58:39,207 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44849 is added to blk_1073741872_1048 (size=12523) 2024-12-03T18:58:39,214 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/.tmp/info/25ae82cf996e4eb9a7ffb974d896042c as hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/info/25ae82cf996e4eb9a7ffb974d896042c 2024-12-03T18:58:39,220 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/info/25ae82cf996e4eb9a7ffb974d896042c, entries=7, sequenceid=298, filesize=12.2 K 2024-12-03T18:58:39,221 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=11.56 KB/11836 for 84dd73c54d97c54fee00ffcbd27bacdc in 25ms, sequenceid=298, compaction requested=false 2024-12-03T18:58:39,221 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 84dd73c54d97c54fee00ffcbd27bacdc: 2024-12-03T18:58:39,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38091 {}] regionserver.HRegion(8855): Flush requested on 84dd73c54d97c54fee00ffcbd27bacdc 2024-12-03T18:58:39,222 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 84dd73c54d97c54fee00ffcbd27bacdc 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-12-03T18:58:39,226 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/.tmp/info/aa179a64123c4916821d73e1af6fde62 is 1080, key is row0229/info:/1733252319197/Put/seqid=0 2024-12-03T18:58:39,230 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33431 is added to blk_1073741873_1049 (size=17918) 2024-12-03T18:58:39,231 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44849 is added to blk_1073741873_1049 (size=17918) 2024-12-03T18:58:39,231 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=313 (bloomFilter=true), to=hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/.tmp/info/aa179a64123c4916821d73e1af6fde62 2024-12-03T18:58:39,237 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/.tmp/info/aa179a64123c4916821d73e1af6fde62 as hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/info/aa179a64123c4916821d73e1af6fde62 2024-12-03T18:58:39,243 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/info/aa179a64123c4916821d73e1af6fde62, entries=12, sequenceid=313, filesize=17.5 K 2024-12-03T18:58:39,244 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=10.51 KB/10760 for 84dd73c54d97c54fee00ffcbd27bacdc in 21ms, sequenceid=313, compaction requested=true 2024-12-03T18:58:39,244 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 84dd73c54d97c54fee00ffcbd27bacdc: 2024-12-03T18:58:39,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38091 {}] regionserver.HRegion(8855): Flush requested on 84dd73c54d97c54fee00ffcbd27bacdc 2024-12-03T18:58:39,244 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 84dd73c54d97c54fee00ffcbd27bacdc:info, priority=-2147483648, current under compaction store size is 1 2024-12-03T18:58:39,244 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T18:58:39,244 DEBUG [RS:0;db5a5ccf5be8:38091-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-03T18:58:39,244 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 84dd73c54d97c54fee00ffcbd27bacdc 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-12-03T18:58:39,245 DEBUG [RS:0;db5a5ccf5be8:38091-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 209077 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-03T18:58:39,245 DEBUG [RS:0;db5a5ccf5be8:38091-shortCompactions-0 {}] regionserver.HStore(1541): 84dd73c54d97c54fee00ffcbd27bacdc/info is initiating minor compaction (all files) 2024-12-03T18:58:39,245 INFO [RS:0;db5a5ccf5be8:38091-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 84dd73c54d97c54fee00ffcbd27bacdc/info in TestLogRolling-testLogRolling,row0062,1733252278676.84dd73c54d97c54fee00ffcbd27bacdc. 2024-12-03T18:58:39,245 INFO [RS:0;db5a5ccf5be8:38091-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/info/29aa68b27cb0441f8aa0b4eccfd5d960, hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/info/25ae82cf996e4eb9a7ffb974d896042c, hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/info/aa179a64123c4916821d73e1af6fde62] into tmpdir=hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/.tmp, totalSize=204.2 K 2024-12-03T18:58:39,246 DEBUG [RS:0;db5a5ccf5be8:38091-shortCompactions-0 {}] compactions.Compactor(225): Compacting 29aa68b27cb0441f8aa0b4eccfd5d960, keycount=160, bloomtype=ROW, size=174.4 K, encoding=NONE, compression=NONE, seqNum=286, earliestPutTs=1733252276598 2024-12-03T18:58:39,246 DEBUG [RS:0;db5a5ccf5be8:38091-shortCompactions-0 {}] compactions.Compactor(225): Compacting 25ae82cf996e4eb9a7ffb974d896042c, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=298, earliestPutTs=1733252317181 2024-12-03T18:58:39,247 DEBUG [RS:0;db5a5ccf5be8:38091-shortCompactions-0 {}] compactions.Compactor(225): Compacting aa179a64123c4916821d73e1af6fde62, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=313, earliestPutTs=1733252319197 2024-12-03T18:58:39,248 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/.tmp/info/1bedc75cfedc4d3582499eeee283ab02 is 1080, key is row0241/info:/1733252319223/Put/seqid=0 2024-12-03T18:58:39,254 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33431 is added to blk_1073741874_1050 (size=16839) 2024-12-03T18:58:39,254 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44849 is added to blk_1073741874_1050 (size=16839) 2024-12-03T18:58:39,256 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=327 (bloomFilter=true), to=hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/.tmp/info/1bedc75cfedc4d3582499eeee283ab02 2024-12-03T18:58:39,261 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/.tmp/info/1bedc75cfedc4d3582499eeee283ab02 as hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/info/1bedc75cfedc4d3582499eeee283ab02 2024-12-03T18:58:39,262 INFO [RS:0;db5a5ccf5be8:38091-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 84dd73c54d97c54fee00ffcbd27bacdc#info#compaction#90 average throughput is 61.23 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-03T18:58:39,263 DEBUG [RS:0;db5a5ccf5be8:38091-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/.tmp/info/b0d653d49a544439bbbdd98b8925bc4f is 1080, key is row0062/info:/1733252276598/Put/seqid=0 2024-12-03T18:58:39,266 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33431 is added to blk_1073741875_1051 (size=199243) 2024-12-03T18:58:39,266 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44849 is added to blk_1073741875_1051 (size=199243) 2024-12-03T18:58:39,266 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/info/1bedc75cfedc4d3582499eeee283ab02, entries=11, sequenceid=327, filesize=16.4 K 2024-12-03T18:58:39,267 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=5.25 KB/5380 for 84dd73c54d97c54fee00ffcbd27bacdc in 23ms, sequenceid=327, compaction requested=false 2024-12-03T18:58:39,267 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 84dd73c54d97c54fee00ffcbd27bacdc: 2024-12-03T18:58:39,270 DEBUG [RS:0;db5a5ccf5be8:38091-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/.tmp/info/b0d653d49a544439bbbdd98b8925bc4f as hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/info/b0d653d49a544439bbbdd98b8925bc4f 2024-12-03T18:58:39,277 INFO [RS:0;db5a5ccf5be8:38091-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 84dd73c54d97c54fee00ffcbd27bacdc/info of 84dd73c54d97c54fee00ffcbd27bacdc into b0d653d49a544439bbbdd98b8925bc4f(size=194.6 K), total size for store is 211.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T18:58:39,277 DEBUG [RS:0;db5a5ccf5be8:38091-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 84dd73c54d97c54fee00ffcbd27bacdc: 2024-12-03T18:58:39,277 INFO [RS:0;db5a5ccf5be8:38091-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1733252278676.84dd73c54d97c54fee00ffcbd27bacdc., storeName=84dd73c54d97c54fee00ffcbd27bacdc/info, priority=13, startTime=1733252319244; duration=0sec 2024-12-03T18:58:39,277 DEBUG [RS:0;db5a5ccf5be8:38091-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T18:58:39,277 DEBUG [RS:0;db5a5ccf5be8:38091-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 84dd73c54d97c54fee00ffcbd27bacdc:info 2024-12-03T18:58:39,489 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,44293,1733252112865/db5a5ccf5be8%2C44293%2C1733252112865.1733252113091 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:58:39,520 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.meta.1733252112663.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:58:40,490 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,44293,1733252112865/db5a5ccf5be8%2C44293%2C1733252112865.1733252113091 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:58:40,560 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.meta.1733252112663.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:58:41,256 INFO [Time-limited test {}] wal.AbstractTestLogRolling(285): after writing there are 0 log files 2024-12-03T18:58:41,257 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor db5a5ccf5be8%2C38091%2C1733252253164.1733252321257 2024-12-03T18:58:41,287 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:58:41,287 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:58:41,287 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:58:41,287 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:58:41,287 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:58:41,287 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/WALs/db5a5ccf5be8,38091,1733252253164/db5a5ccf5be8%2C38091%2C1733252253164.1733252253788 with entries=313, filesize=308.61 KB; new WAL /user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/WALs/db5a5ccf5be8,38091,1733252253164/db5a5ccf5be8%2C38091%2C1733252253164.1733252321257 2024-12-03T18:58:41,288 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43093:43093),(127.0.0.1/127.0.0.1:40925:40925)] 2024-12-03T18:58:41,288 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/WALs/db5a5ccf5be8,38091,1733252253164/db5a5ccf5be8%2C38091%2C1733252253164.1733252253788 is not closed yet, will try archiving it next time 2024-12-03T18:58:41,289 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44849 is added to blk_1073741833_1009 (size=316020) 2024-12-03T18:58:41,292 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=705 B heapSize=2.05 KB 2024-12-03T18:58:41,296 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33431 is added to blk_1073741833_1009 (size=316020) 2024-12-03T18:58:41,302 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/hbase/meta/1588230740/.tmp/info/5e3b2b66bd7b494db24a298c30f7a96b is 193, key is TestLogRolling-testLogRolling,row0062,1733252278676.84dd73c54d97c54fee00ffcbd27bacdc./info:regioninfo/1733252279375/Put/seqid=0 2024-12-03T18:58:41,311 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44849 is added to blk_1073741877_1053 (size=6223) 2024-12-03T18:58:41,312 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33431 is added to blk_1073741877_1053 (size=6223) 2024-12-03T18:58:41,312 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=705 B at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/hbase/meta/1588230740/.tmp/info/5e3b2b66bd7b494db24a298c30f7a96b 2024-12-03T18:58:41,320 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/hbase/meta/1588230740/.tmp/info/5e3b2b66bd7b494db24a298c30f7a96b as hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/hbase/meta/1588230740/info/5e3b2b66bd7b494db24a298c30f7a96b 2024-12-03T18:58:41,327 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/hbase/meta/1588230740/info/5e3b2b66bd7b494db24a298c30f7a96b, entries=5, sequenceid=21, filesize=6.1 K 2024-12-03T18:58:41,328 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~705 B/705, heapSize ~1.29 KB/1320, currentSize=0 B/0 for 1588230740 in 36ms, sequenceid=21, compaction requested=false 2024-12-03T18:58:41,329 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-12-03T18:58:41,329 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 84dd73c54d97c54fee00ffcbd27bacdc 1/1 column families, dataSize=5.25 KB heapSize=5.88 KB 2024-12-03T18:58:41,333 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/.tmp/info/e684a28f447c4391a132cac9f1bb05d6 is 1080, key is row0252/info:/1733252319245/Put/seqid=0 2024-12-03T18:58:41,340 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44849 is added to blk_1073741878_1054 (size=10357) 2024-12-03T18:58:41,340 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33431 is added to blk_1073741878_1054 (size=10357) 2024-12-03T18:58:41,341 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=5.25 KB at sequenceid=336 (bloomFilter=true), to=hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/.tmp/info/e684a28f447c4391a132cac9f1bb05d6 2024-12-03T18:58:41,350 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/.tmp/info/e684a28f447c4391a132cac9f1bb05d6 as hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/info/e684a28f447c4391a132cac9f1bb05d6 2024-12-03T18:58:41,357 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/info/e684a28f447c4391a132cac9f1bb05d6, entries=5, sequenceid=336, filesize=10.1 K 2024-12-03T18:58:41,358 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~5.25 KB/5380, heapSize ~5.86 KB/6000, currentSize=0 B/0 for 84dd73c54d97c54fee00ffcbd27bacdc in 28ms, sequenceid=336, compaction requested=true 2024-12-03T18:58:41,358 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 84dd73c54d97c54fee00ffcbd27bacdc: 2024-12-03T18:58:41,358 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 41df4623a1d17f001bcd00dd80ddad45: 2024-12-03T18:58:41,358 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor db5a5ccf5be8%2C38091%2C1733252253164.1733252321358 2024-12-03T18:58:41,370 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:58:41,370 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:58:41,370 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:58:41,370 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:58:41,371 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:58:41,371 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/WALs/db5a5ccf5be8,38091,1733252253164/db5a5ccf5be8%2C38091%2C1733252253164.1733252321257 with entries=2, filesize=723 B; new WAL /user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/WALs/db5a5ccf5be8,38091,1733252253164/db5a5ccf5be8%2C38091%2C1733252253164.1733252321358 2024-12-03T18:58:41,372 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44849 is added to blk_1073741876_1052 (size=731) 2024-12-03T18:58:41,372 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33431 is added to blk_1073741876_1052 (size=731) 2024-12-03T18:58:41,379 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/WALs/db5a5ccf5be8,38091,1733252253164/db5a5ccf5be8%2C38091%2C1733252253164.1733252253788 to hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/oldWALs/db5a5ccf5be8%2C38091%2C1733252253164.1733252253788 2024-12-03T18:58:41,381 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/WALs/db5a5ccf5be8,38091,1733252253164/db5a5ccf5be8%2C38091%2C1733252253164.1733252321257 to hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/oldWALs/db5a5ccf5be8%2C38091%2C1733252253164.1733252321257 2024-12-03T18:58:41,381 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43093:43093),(127.0.0.1/127.0.0.1:40925:40925)] 2024-12-03T18:58:41,382 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [5,000] milli-secs(wait.for.ratio=[1]) 2024-12-03T18:58:41,382 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-03T18:58:41,382 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-03T18:58:41,382 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-03T18:58:41,383 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T18:58:41,383 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T18:58:41,383 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-03T18:58:41,383 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-03T18:58:41,383 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=39837571, stopped=false 2024-12-03T18:58:41,383 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=db5a5ccf5be8,34991,1733252252995 2024-12-03T18:58:41,438 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34991-0x1019c8e80dc0000, quorum=127.0.0.1:56149, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-03T18:58:41,438 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38091-0x1019c8e80dc0001, quorum=127.0.0.1:56149, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-03T18:58:41,438 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38091-0x1019c8e80dc0001, quorum=127.0.0.1:56149, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T18:58:41,438 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34991-0x1019c8e80dc0000, quorum=127.0.0.1:56149, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T18:58:41,438 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-03T18:58:41,438 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-03T18:58:41,438 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-03T18:58:41,438 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T18:58:41,438 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:34991-0x1019c8e80dc0000, quorum=127.0.0.1:56149, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-03T18:58:41,439 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'db5a5ccf5be8,38091,1733252253164' ***** 2024-12-03T18:58:41,439 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-03T18:58:41,439 INFO [RS:0;db5a5ccf5be8:38091 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-03T18:58:41,439 INFO [RS:0;db5a5ccf5be8:38091 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-03T18:58:41,439 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-03T18:58:41,439 INFO [RS:0;db5a5ccf5be8:38091 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-03T18:58:41,439 INFO [RS:0;db5a5ccf5be8:38091 {}] regionserver.HRegionServer(3091): Received CLOSE for 84dd73c54d97c54fee00ffcbd27bacdc 2024-12-03T18:58:41,439 INFO [RS:0;db5a5ccf5be8:38091 {}] regionserver.HRegionServer(3091): Received CLOSE for 41df4623a1d17f001bcd00dd80ddad45 2024-12-03T18:58:41,439 INFO [RS:0;db5a5ccf5be8:38091 {}] regionserver.HRegionServer(959): stopping server db5a5ccf5be8,38091,1733252253164 2024-12-03T18:58:41,439 INFO [RS:0;db5a5ccf5be8:38091 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-03T18:58:41,439 INFO [RS:0;db5a5ccf5be8:38091 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;db5a5ccf5be8:38091. 2024-12-03T18:58:41,439 DEBUG [RS_CLOSE_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 84dd73c54d97c54fee00ffcbd27bacdc, disabling compactions & flushes 2024-12-03T18:58:41,439 DEBUG [RS:0;db5a5ccf5be8:38091 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-03T18:58:41,439 INFO [RS_CLOSE_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,row0062,1733252278676.84dd73c54d97c54fee00ffcbd27bacdc. 2024-12-03T18:58:41,440 DEBUG [RS:0;db5a5ccf5be8:38091 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T18:58:41,440 DEBUG [RS_CLOSE_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,row0062,1733252278676.84dd73c54d97c54fee00ffcbd27bacdc. 2024-12-03T18:58:41,440 DEBUG [RS_CLOSE_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,row0062,1733252278676.84dd73c54d97c54fee00ffcbd27bacdc. after waiting 0 ms 2024-12-03T18:58:41,440 DEBUG [RS_CLOSE_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,row0062,1733252278676.84dd73c54d97c54fee00ffcbd27bacdc. 2024-12-03T18:58:41,440 INFO [RS:0;db5a5ccf5be8:38091 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-03T18:58:41,440 INFO [RS:0;db5a5ccf5be8:38091 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-03T18:58:41,440 INFO [RS:0;db5a5ccf5be8:38091 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-03T18:58:41,440 INFO [RS:0;db5a5ccf5be8:38091 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-03T18:58:41,440 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:38091-0x1019c8e80dc0001, quorum=127.0.0.1:56149, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-03T18:58:41,440 INFO [RS:0;db5a5ccf5be8:38091 {}] regionserver.HRegionServer(1321): Waiting on 3 regions to close 2024-12-03T18:58:41,441 DEBUG [RS:0;db5a5ccf5be8:38091 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, 84dd73c54d97c54fee00ffcbd27bacdc=TestLogRolling-testLogRolling,row0062,1733252278676.84dd73c54d97c54fee00ffcbd27bacdc., 41df4623a1d17f001bcd00dd80ddad45=TestLogRolling-testLogRolling,,1733252278676.41df4623a1d17f001bcd00dd80ddad45.} 2024-12-03T18:58:41,441 DEBUG [RS:0;db5a5ccf5be8:38091 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 41df4623a1d17f001bcd00dd80ddad45, 84dd73c54d97c54fee00ffcbd27bacdc 2024-12-03T18:58:41,441 DEBUG [RS_CLOSE_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-03T18:58:41,442 INFO [RS_CLOSE_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-03T18:58:41,441 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733252278676.84dd73c54d97c54fee00ffcbd27bacdc.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/info/8047aa4aba364985a72541625f874069.549af462e760befc589617aa40c27d98->hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/549af462e760befc589617aa40c27d98/info/8047aa4aba364985a72541625f874069-top, hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/info/c408df797d3243eaa1b720cae687e99e, hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/info/TestLogRolling-testLogRolling=549af462e760befc589617aa40c27d98-50b24cd1da3e4ea284893a0af659fbae, hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/info/607c7c5868f842d39ae04a24bf82bf21, hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/info/23e8f628685b441ba67eab2bd8a3b9a2, hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/info/b3e560bfd7ae4e449b8e72a7f3b62c5f, hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/info/83ebfadc8db44d51a625013620b4fd28, hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/info/8e182317387749f0a057f5acbbb6e8e5, hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/info/7b669b8ff7e9438e9c7507be3502847f, hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/info/a6480b9a5cb94d6799e8143f21b14bdb, hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/info/cc96b6ea9ce243acad2c9cfd713a9ca3, hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/info/fcf7c2b9e37a4d2a9e10e44152f6e741, hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/info/a7ff160018f5477d83ef9db6e40bd824, hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/info/a0833655fc794bad8fed0bce19a17090, hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/info/461400d33b2243b19adf9f7fdab1bdf9, hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/info/522a047ed296451795e80936c7528a98, hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/info/d330e73e544d4e22be82b190a0508c5a, hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/info/d3371dfcf68d48f19d03aa5342cc88a6, hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/info/f17c7eb84e6e484b8067d5a2a6aa19fd, hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/info/29aa68b27cb0441f8aa0b4eccfd5d960, hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/info/fe9d5dffc87f488391797eb7f147c705, hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/info/25ae82cf996e4eb9a7ffb974d896042c, hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/info/aa179a64123c4916821d73e1af6fde62] to archive 2024-12-03T18:58:41,442 DEBUG [RS_CLOSE_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-03T18:58:41,442 DEBUG [RS_CLOSE_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-03T18:58:41,442 DEBUG [RS_CLOSE_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-03T18:58:41,443 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733252278676.84dd73c54d97c54fee00ffcbd27bacdc.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-03T18:58:41,445 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733252278676.84dd73c54d97c54fee00ffcbd27bacdc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/info/8047aa4aba364985a72541625f874069.549af462e760befc589617aa40c27d98 to hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/archive/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/info/8047aa4aba364985a72541625f874069.549af462e760befc589617aa40c27d98 2024-12-03T18:58:41,447 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733252278676.84dd73c54d97c54fee00ffcbd27bacdc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/info/c408df797d3243eaa1b720cae687e99e to hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/archive/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/info/c408df797d3243eaa1b720cae687e99e 2024-12-03T18:58:41,449 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733252278676.84dd73c54d97c54fee00ffcbd27bacdc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/info/TestLogRolling-testLogRolling=549af462e760befc589617aa40c27d98-50b24cd1da3e4ea284893a0af659fbae to hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/archive/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/info/TestLogRolling-testLogRolling=549af462e760befc589617aa40c27d98-50b24cd1da3e4ea284893a0af659fbae 2024-12-03T18:58:41,451 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733252278676.84dd73c54d97c54fee00ffcbd27bacdc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/info/607c7c5868f842d39ae04a24bf82bf21 to hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/archive/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/info/607c7c5868f842d39ae04a24bf82bf21 2024-12-03T18:58:41,451 DEBUG [RS_CLOSE_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/hbase/meta/1588230740/recovered.edits/24.seqid, newMaxSeqId=24, maxSeqId=1 2024-12-03T18:58:41,451 DEBUG [RS_CLOSE_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-03T18:58:41,452 INFO [RS_CLOSE_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-03T18:58:41,452 DEBUG [RS_CLOSE_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733252321441Running coprocessor pre-close hooks at 1733252321441Disabling compacts and flushes for region at 1733252321441Disabling writes for close at 1733252321442 (+1 ms)Writing region close event to WAL at 1733252321446 (+4 ms)Running coprocessor post-close hooks at 1733252321451 (+5 ms)Closed at 1733252321451 2024-12-03T18:58:41,452 DEBUG [RS_CLOSE_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-03T18:58:41,452 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733252278676.84dd73c54d97c54fee00ffcbd27bacdc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/info/23e8f628685b441ba67eab2bd8a3b9a2 to hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/archive/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/info/23e8f628685b441ba67eab2bd8a3b9a2 2024-12-03T18:58:41,454 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733252278676.84dd73c54d97c54fee00ffcbd27bacdc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/info/b3e560bfd7ae4e449b8e72a7f3b62c5f to hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/archive/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/info/b3e560bfd7ae4e449b8e72a7f3b62c5f 2024-12-03T18:58:41,455 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733252278676.84dd73c54d97c54fee00ffcbd27bacdc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/info/83ebfadc8db44d51a625013620b4fd28 to hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/archive/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/info/83ebfadc8db44d51a625013620b4fd28 2024-12-03T18:58:41,456 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733252278676.84dd73c54d97c54fee00ffcbd27bacdc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/info/8e182317387749f0a057f5acbbb6e8e5 to hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/archive/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/info/8e182317387749f0a057f5acbbb6e8e5 2024-12-03T18:58:41,457 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733252278676.84dd73c54d97c54fee00ffcbd27bacdc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/info/7b669b8ff7e9438e9c7507be3502847f to hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/archive/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/info/7b669b8ff7e9438e9c7507be3502847f 2024-12-03T18:58:41,459 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733252278676.84dd73c54d97c54fee00ffcbd27bacdc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/info/a6480b9a5cb94d6799e8143f21b14bdb to hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/archive/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/info/a6480b9a5cb94d6799e8143f21b14bdb 2024-12-03T18:58:41,460 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733252278676.84dd73c54d97c54fee00ffcbd27bacdc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/info/cc96b6ea9ce243acad2c9cfd713a9ca3 to hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/archive/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/info/cc96b6ea9ce243acad2c9cfd713a9ca3 2024-12-03T18:58:41,461 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733252278676.84dd73c54d97c54fee00ffcbd27bacdc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/info/fcf7c2b9e37a4d2a9e10e44152f6e741 to hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/archive/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/info/fcf7c2b9e37a4d2a9e10e44152f6e741 2024-12-03T18:58:41,463 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733252278676.84dd73c54d97c54fee00ffcbd27bacdc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/info/a7ff160018f5477d83ef9db6e40bd824 to hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/archive/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/info/a7ff160018f5477d83ef9db6e40bd824 2024-12-03T18:58:41,464 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733252278676.84dd73c54d97c54fee00ffcbd27bacdc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/info/a0833655fc794bad8fed0bce19a17090 to hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/archive/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/info/a0833655fc794bad8fed0bce19a17090 2024-12-03T18:58:41,466 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733252278676.84dd73c54d97c54fee00ffcbd27bacdc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/info/461400d33b2243b19adf9f7fdab1bdf9 to hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/archive/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/info/461400d33b2243b19adf9f7fdab1bdf9 2024-12-03T18:58:41,467 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733252278676.84dd73c54d97c54fee00ffcbd27bacdc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/info/522a047ed296451795e80936c7528a98 to hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/archive/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/info/522a047ed296451795e80936c7528a98 2024-12-03T18:58:41,468 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733252278676.84dd73c54d97c54fee00ffcbd27bacdc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/info/d330e73e544d4e22be82b190a0508c5a to hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/archive/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/info/d330e73e544d4e22be82b190a0508c5a 2024-12-03T18:58:41,469 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733252278676.84dd73c54d97c54fee00ffcbd27bacdc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/info/d3371dfcf68d48f19d03aa5342cc88a6 to hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/archive/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/info/d3371dfcf68d48f19d03aa5342cc88a6 2024-12-03T18:58:41,471 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733252278676.84dd73c54d97c54fee00ffcbd27bacdc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/info/f17c7eb84e6e484b8067d5a2a6aa19fd to hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/archive/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/info/f17c7eb84e6e484b8067d5a2a6aa19fd 2024-12-03T18:58:41,472 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733252278676.84dd73c54d97c54fee00ffcbd27bacdc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/info/29aa68b27cb0441f8aa0b4eccfd5d960 to hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/archive/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/info/29aa68b27cb0441f8aa0b4eccfd5d960 2024-12-03T18:58:41,474 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733252278676.84dd73c54d97c54fee00ffcbd27bacdc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/info/fe9d5dffc87f488391797eb7f147c705 to hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/archive/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/info/fe9d5dffc87f488391797eb7f147c705 2024-12-03T18:58:41,475 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733252278676.84dd73c54d97c54fee00ffcbd27bacdc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/info/25ae82cf996e4eb9a7ffb974d896042c to hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/archive/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/info/25ae82cf996e4eb9a7ffb974d896042c 2024-12-03T18:58:41,477 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733252278676.84dd73c54d97c54fee00ffcbd27bacdc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/info/aa179a64123c4916821d73e1af6fde62 to hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/archive/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/info/aa179a64123c4916821d73e1af6fde62 2024-12-03T18:58:41,477 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733252278676.84dd73c54d97c54fee00ffcbd27bacdc.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=db5a5ccf5be8:34991 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 16 more 2024-12-03T18:58:41,478 WARN [StoreCloser-TestLogRolling-testLogRolling,row0062,1733252278676.84dd73c54d97c54fee00ffcbd27bacdc.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [c408df797d3243eaa1b720cae687e99e=20230, 607c7c5868f842d39ae04a24bf82bf21=12509, 23e8f628685b441ba67eab2bd8a3b9a2=44066, b3e560bfd7ae4e449b8e72a7f3b62c5f=21141, 83ebfadc8db44d51a625013620b4fd28=19000, 8e182317387749f0a057f5acbbb6e8e5=76649, 7b669b8ff7e9438e9c7507be3502847f=23316, a6480b9a5cb94d6799e8143f21b14bdb=12516, cc96b6ea9ce243acad2c9cfd713a9ca3=96159, fcf7c2b9e37a4d2a9e10e44152f6e741=16828, a7ff160018f5477d83ef9db6e40bd824=20078, a0833655fc794bad8fed0bce19a17090=118903, 461400d33b2243b19adf9f7fdab1bdf9=12516, 522a047ed296451795e80936c7528a98=17906, d330e73e544d4e22be82b190a0508c5a=148316, d3371dfcf68d48f19d03aa5342cc88a6=21156, f17c7eb84e6e484b8067d5a2a6aa19fd=16837, 29aa68b27cb0441f8aa0b4eccfd5d960=178636, fe9d5dffc87f488391797eb7f147c705=23333, 25ae82cf996e4eb9a7ffb974d896042c=12523, aa179a64123c4916821d73e1af6fde62=17918] 2024-12-03T18:58:41,483 DEBUG [RS_CLOSE_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/84dd73c54d97c54fee00ffcbd27bacdc/recovered.edits/339.seqid, newMaxSeqId=339, maxSeqId=99 2024-12-03T18:58:41,483 INFO [RS_CLOSE_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,row0062,1733252278676.84dd73c54d97c54fee00ffcbd27bacdc. 2024-12-03T18:58:41,483 DEBUG [RS_CLOSE_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 84dd73c54d97c54fee00ffcbd27bacdc: Waiting for close lock at 1733252321439Running coprocessor pre-close hooks at 1733252321439Disabling compacts and flushes for region at 1733252321439Disabling writes for close at 1733252321440 (+1 ms)Writing region close event to WAL at 1733252321478 (+38 ms)Running coprocessor post-close hooks at 1733252321483 (+5 ms)Closed at 1733252321483 2024-12-03T18:58:41,484 DEBUG [RS_CLOSE_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRolling,row0062,1733252278676.84dd73c54d97c54fee00ffcbd27bacdc. 2024-12-03T18:58:41,484 DEBUG [RS_CLOSE_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 41df4623a1d17f001bcd00dd80ddad45, disabling compactions & flushes 2024-12-03T18:58:41,484 INFO [RS_CLOSE_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1733252278676.41df4623a1d17f001bcd00dd80ddad45. 2024-12-03T18:58:41,484 DEBUG [RS_CLOSE_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1733252278676.41df4623a1d17f001bcd00dd80ddad45. 2024-12-03T18:58:41,484 DEBUG [RS_CLOSE_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1733252278676.41df4623a1d17f001bcd00dd80ddad45. after waiting 0 ms 2024-12-03T18:58:41,484 DEBUG [RS_CLOSE_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1733252278676.41df4623a1d17f001bcd00dd80ddad45. 2024-12-03T18:58:41,484 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733252278676.41df4623a1d17f001bcd00dd80ddad45.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/41df4623a1d17f001bcd00dd80ddad45/info/8047aa4aba364985a72541625f874069.549af462e760befc589617aa40c27d98->hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/549af462e760befc589617aa40c27d98/info/8047aa4aba364985a72541625f874069-bottom] to archive 2024-12-03T18:58:41,486 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733252278676.41df4623a1d17f001bcd00dd80ddad45.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-03T18:58:41,487 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733252278676.41df4623a1d17f001bcd00dd80ddad45.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/41df4623a1d17f001bcd00dd80ddad45/info/8047aa4aba364985a72541625f874069.549af462e760befc589617aa40c27d98 to hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/archive/data/default/TestLogRolling-testLogRolling/41df4623a1d17f001bcd00dd80ddad45/info/8047aa4aba364985a72541625f874069.549af462e760befc589617aa40c27d98 2024-12-03T18:58:41,487 WARN [StoreCloser-TestLogRolling-testLogRolling,,1733252278676.41df4623a1d17f001bcd00dd80ddad45.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [] 2024-12-03T18:58:41,490 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,44293,1733252112865/db5a5ccf5be8%2C44293%2C1733252112865.1733252113091 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:58:41,491 DEBUG [RS_CLOSE_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/data/default/TestLogRolling-testLogRolling/41df4623a1d17f001bcd00dd80ddad45/recovered.edits/104.seqid, newMaxSeqId=104, maxSeqId=99 2024-12-03T18:58:41,491 INFO [RS_CLOSE_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1733252278676.41df4623a1d17f001bcd00dd80ddad45. 2024-12-03T18:58:41,491 DEBUG [RS_CLOSE_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 41df4623a1d17f001bcd00dd80ddad45: Waiting for close lock at 1733252321484Running coprocessor pre-close hooks at 1733252321484Disabling compacts and flushes for region at 1733252321484Disabling writes for close at 1733252321484Writing region close event to WAL at 1733252321487 (+3 ms)Running coprocessor post-close hooks at 1733252321491 (+4 ms)Closed at 1733252321491 2024-12-03T18:58:41,491 DEBUG [RS_CLOSE_REGION-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRolling,,1733252278676.41df4623a1d17f001bcd00dd80ddad45. 2024-12-03T18:58:41,560 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.meta.1733252112663.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:58:41,641 INFO [RS:0;db5a5ccf5be8:38091 {}] regionserver.HRegionServer(976): stopping server db5a5ccf5be8,38091,1733252253164; all regions closed. 2024-12-03T18:58:41,641 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:58:41,641 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:58:41,641 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:58:41,641 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:58:41,642 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:58:41,643 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44849 is added to blk_1073741834_1010 (size=8107) 2024-12-03T18:58:41,644 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33431 is added to blk_1073741834_1010 (size=8107) 2024-12-03T18:58:41,647 DEBUG [RS:0;db5a5ccf5be8:38091 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/oldWALs 2024-12-03T18:58:41,647 INFO [RS:0;db5a5ccf5be8:38091 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog db5a5ccf5be8%2C38091%2C1733252253164.meta:.meta(num 1733252254112) 2024-12-03T18:58:41,647 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:58:41,647 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:58:41,647 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:58:41,647 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:58:41,647 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:58:41,649 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44849 is added to blk_1073741879_1055 (size=778) 2024-12-03T18:58:41,649 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33431 is added to blk_1073741879_1055 (size=778) 2024-12-03T18:58:41,651 DEBUG [RS:0;db5a5ccf5be8:38091 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/oldWALs 2024-12-03T18:58:41,651 INFO [RS:0;db5a5ccf5be8:38091 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog db5a5ccf5be8%2C38091%2C1733252253164:(num 1733252321358) 2024-12-03T18:58:41,651 DEBUG [RS:0;db5a5ccf5be8:38091 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T18:58:41,651 INFO [RS:0;db5a5ccf5be8:38091 {}] regionserver.LeaseManager(133): Closed leases 2024-12-03T18:58:41,651 INFO [RS:0;db5a5ccf5be8:38091 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-03T18:58:41,651 INFO [RS:0;db5a5ccf5be8:38091 {}] hbase.ChoreService(370): Chore service for: regionserver/db5a5ccf5be8:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-12-03T18:58:41,652 INFO [RS:0;db5a5ccf5be8:38091 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-03T18:58:41,652 INFO [regionserver/db5a5ccf5be8:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-03T18:58:41,652 INFO [RS:0;db5a5ccf5be8:38091 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:38091 2024-12-03T18:58:41,653 INFO [regionserver/db5a5ccf5be8:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-03T18:58:41,659 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38091-0x1019c8e80dc0001, quorum=127.0.0.1:56149, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/db5a5ccf5be8,38091,1733252253164 2024-12-03T18:58:41,659 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34991-0x1019c8e80dc0000, quorum=127.0.0.1:56149, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-03T18:58:41,659 INFO [RS:0;db5a5ccf5be8:38091 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-03T18:58:41,669 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [db5a5ccf5be8,38091,1733252253164] 2024-12-03T18:58:41,680 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/db5a5ccf5be8,38091,1733252253164 already deleted, retry=false 2024-12-03T18:58:41,680 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; db5a5ccf5be8,38091,1733252253164 expired; onlineServers=0 2024-12-03T18:58:41,680 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'db5a5ccf5be8,34991,1733252252995' ***** 2024-12-03T18:58:41,680 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-03T18:58:41,680 INFO [M:0;db5a5ccf5be8:34991 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-03T18:58:41,680 INFO [M:0;db5a5ccf5be8:34991 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-03T18:58:41,680 DEBUG [M:0;db5a5ccf5be8:34991 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-03T18:58:41,680 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-03T18:58:41,680 DEBUG [M:0;db5a5ccf5be8:34991 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-03T18:58:41,680 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster-HFileCleaner.large.0-1733252253498 {}] cleaner.HFileCleaner(306): Exit Thread[master/db5a5ccf5be8:0:becomeActiveMaster-HFileCleaner.large.0-1733252253498,5,FailOnTimeoutGroup] 2024-12-03T18:58:41,680 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster-HFileCleaner.small.0-1733252253499 {}] cleaner.HFileCleaner(306): Exit Thread[master/db5a5ccf5be8:0:becomeActiveMaster-HFileCleaner.small.0-1733252253499,5,FailOnTimeoutGroup] 2024-12-03T18:58:41,681 INFO [M:0;db5a5ccf5be8:34991 {}] hbase.ChoreService(370): Chore service for: master/db5a5ccf5be8:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-03T18:58:41,681 INFO [M:0;db5a5ccf5be8:34991 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-03T18:58:41,681 DEBUG [M:0;db5a5ccf5be8:34991 {}] master.HMaster(1795): Stopping service threads 2024-12-03T18:58:41,681 INFO [M:0;db5a5ccf5be8:34991 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-03T18:58:41,681 INFO [M:0;db5a5ccf5be8:34991 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-03T18:58:41,681 INFO [M:0;db5a5ccf5be8:34991 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-03T18:58:41,681 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-03T18:58:41,690 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34991-0x1019c8e80dc0000, quorum=127.0.0.1:56149, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-03T18:58:41,690 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34991-0x1019c8e80dc0000, quorum=127.0.0.1:56149, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T18:58:41,690 DEBUG [M:0;db5a5ccf5be8:34991 {}] zookeeper.ZKUtil(347): master:34991-0x1019c8e80dc0000, quorum=127.0.0.1:56149, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-03T18:58:41,690 WARN [M:0;db5a5ccf5be8:34991 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-03T18:58:41,691 INFO [M:0;db5a5ccf5be8:34991 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/.lastflushedseqids 2024-12-03T18:58:41,695 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33431 is added to blk_1073741880_1056 (size=228) 2024-12-03T18:58:41,695 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44849 is added to blk_1073741880_1056 (size=228) 2024-12-03T18:58:41,698 INFO [M:0;db5a5ccf5be8:34991 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-03T18:58:41,698 INFO [M:0;db5a5ccf5be8:34991 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-03T18:58:41,699 DEBUG [M:0;db5a5ccf5be8:34991 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-03T18:58:41,699 INFO [M:0;db5a5ccf5be8:34991 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T18:58:41,699 DEBUG [M:0;db5a5ccf5be8:34991 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T18:58:41,699 DEBUG [M:0;db5a5ccf5be8:34991 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-03T18:58:41,699 DEBUG [M:0;db5a5ccf5be8:34991 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T18:58:41,699 INFO [M:0;db5a5ccf5be8:34991 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=51.41 KB heapSize=63.33 KB 2024-12-03T18:58:41,716 DEBUG [M:0;db5a5ccf5be8:34991 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/d5c94a7aad474c72a54f5e8ef7ef3591 is 82, key is hbase:meta,,1/info:regioninfo/1733252254141/Put/seqid=0 2024-12-03T18:58:41,724 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33431 is added to blk_1073741881_1057 (size=5672) 2024-12-03T18:58:41,724 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44849 is added to blk_1073741881_1057 (size=5672) 2024-12-03T18:58:41,769 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38091-0x1019c8e80dc0001, quorum=127.0.0.1:56149, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-03T18:58:41,769 INFO [RS:0;db5a5ccf5be8:38091 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-03T18:58:41,769 INFO [RS:0;db5a5ccf5be8:38091 {}] regionserver.HRegionServer(1031): Exiting; stopping=db5a5ccf5be8,38091,1733252253164; zookeeper connection closed. 2024-12-03T18:58:41,769 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38091-0x1019c8e80dc0001, quorum=127.0.0.1:56149, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-03T18:58:41,770 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@174be924 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@174be924 2024-12-03T18:58:41,770 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-12-03T18:58:42,121 INFO [M:0;db5a5ccf5be8:34991 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/d5c94a7aad474c72a54f5e8ef7ef3591 2024-12-03T18:58:42,143 DEBUG [M:0;db5a5ccf5be8:34991 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/59259c8d110b4cc78863586c8e39f5cf is 749, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1733252254669/Put/seqid=0 2024-12-03T18:58:42,156 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44849 is added to blk_1073741882_1058 (size=7089) 2024-12-03T18:58:42,156 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33431 is added to blk_1073741882_1058 (size=7089) 2024-12-03T18:58:42,157 INFO [M:0;db5a5ccf5be8:34991 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=50.80 KB at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/59259c8d110b4cc78863586c8e39f5cf 2024-12-03T18:58:42,161 INFO [M:0;db5a5ccf5be8:34991 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 59259c8d110b4cc78863586c8e39f5cf 2024-12-03T18:58:42,181 DEBUG [M:0;db5a5ccf5be8:34991 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/35529e097a4943c0833a48faf47b57a8 is 69, key is db5a5ccf5be8,38091,1733252253164/rs:state/1733252253629/Put/seqid=0 2024-12-03T18:58:42,186 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33431 is added to blk_1073741883_1059 (size=5156) 2024-12-03T18:58:42,186 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44849 is added to blk_1073741883_1059 (size=5156) 2024-12-03T18:58:42,186 INFO [M:0;db5a5ccf5be8:34991 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/35529e097a4943c0833a48faf47b57a8 2024-12-03T18:58:42,209 DEBUG [M:0;db5a5ccf5be8:34991 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/5d803686da43436ca0185435de3272cf is 52, key is load_balancer_on/state:d/1733252254294/Put/seqid=0 2024-12-03T18:58:42,217 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33431 is added to blk_1073741884_1060 (size=5056) 2024-12-03T18:58:42,218 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44849 is added to blk_1073741884_1060 (size=5056) 2024-12-03T18:58:42,219 INFO [M:0;db5a5ccf5be8:34991 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/5d803686da43436ca0185435de3272cf 2024-12-03T18:58:42,224 DEBUG [M:0;db5a5ccf5be8:34991 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/d5c94a7aad474c72a54f5e8ef7ef3591 as hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/d5c94a7aad474c72a54f5e8ef7ef3591 2024-12-03T18:58:42,229 INFO [M:0;db5a5ccf5be8:34991 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/d5c94a7aad474c72a54f5e8ef7ef3591, entries=8, sequenceid=125, filesize=5.5 K 2024-12-03T18:58:42,230 DEBUG [M:0;db5a5ccf5be8:34991 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/59259c8d110b4cc78863586c8e39f5cf as hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/59259c8d110b4cc78863586c8e39f5cf 2024-12-03T18:58:42,237 INFO [M:0;db5a5ccf5be8:34991 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 59259c8d110b4cc78863586c8e39f5cf 2024-12-03T18:58:42,237 INFO [M:0;db5a5ccf5be8:34991 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/59259c8d110b4cc78863586c8e39f5cf, entries=13, sequenceid=125, filesize=6.9 K 2024-12-03T18:58:42,238 DEBUG [M:0;db5a5ccf5be8:34991 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/35529e097a4943c0833a48faf47b57a8 as hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/35529e097a4943c0833a48faf47b57a8 2024-12-03T18:58:42,243 INFO [M:0;db5a5ccf5be8:34991 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/35529e097a4943c0833a48faf47b57a8, entries=1, sequenceid=125, filesize=5.0 K 2024-12-03T18:58:42,246 DEBUG [M:0;db5a5ccf5be8:34991 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/5d803686da43436ca0185435de3272cf as hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/5d803686da43436ca0185435de3272cf 2024-12-03T18:58:42,251 INFO [M:0;db5a5ccf5be8:34991 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42457/user/jenkins/test-data/531c400c-292b-50dd-0da5-b2f6c48efda3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/5d803686da43436ca0185435de3272cf, entries=1, sequenceid=125, filesize=4.9 K 2024-12-03T18:58:42,252 INFO [M:0;db5a5ccf5be8:34991 {}] regionserver.HRegion(3140): Finished flush of dataSize ~51.41 KB/52639, heapSize ~63.27 KB/64784, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 552ms, sequenceid=125, compaction requested=false 2024-12-03T18:58:42,254 INFO [M:0;db5a5ccf5be8:34991 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T18:58:42,254 DEBUG [M:0;db5a5ccf5be8:34991 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733252321698Disabling compacts and flushes for region at 1733252321698Disabling writes for close at 1733252321699 (+1 ms)Obtaining lock to block concurrent updates at 1733252321699Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733252321699Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=52639, getHeapSize=64784, getOffHeapSize=0, getCellsCount=148 at 1733252321699Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733252321700 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733252321700Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733252321715 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733252321715Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733252322127 (+412 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733252322142 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733252322142Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733252322161 (+19 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733252322180 (+19 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733252322180Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1733252322190 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1733252322209 (+19 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1733252322209Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2dd9690f: reopening flushed file at 1733252322223 (+14 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@48456f7: reopening flushed file at 1733252322230 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@314b159f: reopening flushed file at 1733252322237 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7b631135: reopening flushed file at 1733252322243 (+6 ms)Finished flush of dataSize ~51.41 KB/52639, heapSize ~63.27 KB/64784, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 552ms, sequenceid=125, compaction requested=false at 1733252322252 (+9 ms)Writing region close event to WAL at 1733252322253 (+1 ms)Closed at 1733252322253 2024-12-03T18:58:42,255 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:58:42,255 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:58:42,255 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:58:42,255 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:58:42,255 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:58:42,257 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33431 is added to blk_1073741830_1006 (size=61308) 2024-12-03T18:58:42,257 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44849 is added to blk_1073741830_1006 (size=61308) 2024-12-03T18:58:42,258 INFO [M:0;db5a5ccf5be8:34991 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-03T18:58:42,258 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-03T18:58:42,258 INFO [M:0;db5a5ccf5be8:34991 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:34991 2024-12-03T18:58:42,258 INFO [M:0;db5a5ccf5be8:34991 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-03T18:58:42,369 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34991-0x1019c8e80dc0000, quorum=127.0.0.1:56149, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-03T18:58:42,369 INFO [M:0;db5a5ccf5be8:34991 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-03T18:58:42,369 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34991-0x1019c8e80dc0000, quorum=127.0.0.1:56149, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-03T18:58:42,404 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5ce0a24{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T18:58:42,405 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@47bcda8c{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-03T18:58:42,405 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-03T18:58:42,405 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6a5db76d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-03T18:58:42,405 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4437c7ec{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3bac46c1-7bdc-fefe-b57d-f71e0ef783ba/hadoop.log.dir/,STOPPED} 2024-12-03T18:58:42,407 WARN [BP-1603513973-172.17.0.2-1733252250452 heartbeating to localhost/127.0.0.1:42457 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-03T18:58:42,407 WARN [BP-1603513973-172.17.0.2-1733252250452 heartbeating to localhost/127.0.0.1:42457 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1603513973-172.17.0.2-1733252250452 (Datanode Uuid 0d2a7dac-d708-4f9e-b6e1-0fcb96f23176) service to localhost/127.0.0.1:42457 2024-12-03T18:58:42,407 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-03T18:58:42,407 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-03T18:58:42,408 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3bac46c1-7bdc-fefe-b57d-f71e0ef783ba/cluster_f08de17b-ace7-a366-8397-2ec1db47c248/data/data3/current/BP-1603513973-172.17.0.2-1733252250452 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T18:58:42,408 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3bac46c1-7bdc-fefe-b57d-f71e0ef783ba/cluster_f08de17b-ace7-a366-8397-2ec1db47c248/data/data4/current/BP-1603513973-172.17.0.2-1733252250452 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T18:58:42,408 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-03T18:58:42,411 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1204fb24{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T18:58:42,411 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@23e1642c{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-03T18:58:42,411 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-03T18:58:42,411 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5551c062{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-03T18:58:42,411 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@41b7d19a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3bac46c1-7bdc-fefe-b57d-f71e0ef783ba/hadoop.log.dir/,STOPPED} 2024-12-03T18:58:42,412 WARN [BP-1603513973-172.17.0.2-1733252250452 heartbeating to localhost/127.0.0.1:42457 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-03T18:58:42,412 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-03T18:58:42,412 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-03T18:58:42,412 WARN [BP-1603513973-172.17.0.2-1733252250452 heartbeating to localhost/127.0.0.1:42457 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1603513973-172.17.0.2-1733252250452 (Datanode Uuid acc0d782-1206-49a3-a9c7-501e90658194) service to localhost/127.0.0.1:42457 2024-12-03T18:58:42,413 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3bac46c1-7bdc-fefe-b57d-f71e0ef783ba/cluster_f08de17b-ace7-a366-8397-2ec1db47c248/data/data1/current/BP-1603513973-172.17.0.2-1733252250452 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T18:58:42,413 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3bac46c1-7bdc-fefe-b57d-f71e0ef783ba/cluster_f08de17b-ace7-a366-8397-2ec1db47c248/data/data2/current/BP-1603513973-172.17.0.2-1733252250452 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T18:58:42,413 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-03T18:58:42,418 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@43909889{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-03T18:58:42,418 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4ac7d52f{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-03T18:58:42,418 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-03T18:58:42,418 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4881a2ed{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-03T18:58:42,419 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@45bda0cb{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3bac46c1-7bdc-fefe-b57d-f71e0ef783ba/hadoop.log.dir/,STOPPED} 2024-12-03T18:58:42,425 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-03T18:58:42,453 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-03T18:58:42,460 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRolling Thread=227 (was 207) Potentially hanging thread: nioEventLoopGroup-39-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:42457 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:42457 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:42457 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: LeaseRenewer:jenkins.hfs.6@localhost:42457 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:42457 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:42457 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:42457 from jenkins.hfs.6 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-38-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:42457 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=518 (was 486) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=215 (was 142) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=5654 (was 6226) 2024-12-03T18:58:42,466 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnNothingWritten Thread=227, OpenFileDescriptor=518, MaxFileDescriptor=1048576, SystemLoadAverage=215, ProcessCount=11, AvailableMemoryMB=5654 2024-12-03T18:58:42,466 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-03T18:58:42,466 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3bac46c1-7bdc-fefe-b57d-f71e0ef783ba/hadoop.log.dir so I do NOT create it in target/test-data/e8d1147c-5b65-f1cd-ceb8-11e11ff6bdc8 2024-12-03T18:58:42,466 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3bac46c1-7bdc-fefe-b57d-f71e0ef783ba/hadoop.tmp.dir so I do NOT create it in target/test-data/e8d1147c-5b65-f1cd-ceb8-11e11ff6bdc8 2024-12-03T18:58:42,466 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e8d1147c-5b65-f1cd-ceb8-11e11ff6bdc8/cluster_48c401af-08dc-bb2f-7152-f0bca1b62d1d, deleteOnExit=true 2024-12-03T18:58:42,466 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-03T18:58:42,466 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e8d1147c-5b65-f1cd-ceb8-11e11ff6bdc8/test.cache.data in system properties and HBase conf 2024-12-03T18:58:42,467 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e8d1147c-5b65-f1cd-ceb8-11e11ff6bdc8/hadoop.tmp.dir in system properties and HBase conf 2024-12-03T18:58:42,467 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e8d1147c-5b65-f1cd-ceb8-11e11ff6bdc8/hadoop.log.dir in system properties and HBase conf 2024-12-03T18:58:42,467 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e8d1147c-5b65-f1cd-ceb8-11e11ff6bdc8/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-03T18:58:42,467 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e8d1147c-5b65-f1cd-ceb8-11e11ff6bdc8/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-03T18:58:42,467 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-03T18:58:42,467 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-03T18:58:42,467 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e8d1147c-5b65-f1cd-ceb8-11e11ff6bdc8/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-03T18:58:42,467 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e8d1147c-5b65-f1cd-ceb8-11e11ff6bdc8/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-03T18:58:42,467 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e8d1147c-5b65-f1cd-ceb8-11e11ff6bdc8/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-03T18:58:42,467 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e8d1147c-5b65-f1cd-ceb8-11e11ff6bdc8/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-03T18:58:42,467 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e8d1147c-5b65-f1cd-ceb8-11e11ff6bdc8/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-03T18:58:42,467 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e8d1147c-5b65-f1cd-ceb8-11e11ff6bdc8/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-03T18:58:42,467 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e8d1147c-5b65-f1cd-ceb8-11e11ff6bdc8/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-03T18:58:42,467 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e8d1147c-5b65-f1cd-ceb8-11e11ff6bdc8/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-03T18:58:42,467 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e8d1147c-5b65-f1cd-ceb8-11e11ff6bdc8/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-03T18:58:42,467 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e8d1147c-5b65-f1cd-ceb8-11e11ff6bdc8/nfs.dump.dir in system properties and HBase conf 2024-12-03T18:58:42,467 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e8d1147c-5b65-f1cd-ceb8-11e11ff6bdc8/java.io.tmpdir in system properties and HBase conf 2024-12-03T18:58:42,468 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e8d1147c-5b65-f1cd-ceb8-11e11ff6bdc8/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-03T18:58:42,468 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e8d1147c-5b65-f1cd-ceb8-11e11ff6bdc8/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-03T18:58:42,468 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e8d1147c-5b65-f1cd-ceb8-11e11ff6bdc8/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-03T18:58:42,481 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-03T18:58:42,491 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,44293,1733252112865/db5a5ccf5be8%2C44293%2C1733252112865.1733252113091 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:58:42,561 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.meta.1733252112663.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:58:42,796 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-03T18:58:42,799 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-03T18:58:42,800 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-03T18:58:42,800 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-03T18:58:42,800 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-03T18:58:42,801 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-03T18:58:42,801 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@259cffcc{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e8d1147c-5b65-f1cd-ceb8-11e11ff6bdc8/hadoop.log.dir/,AVAILABLE} 2024-12-03T18:58:42,801 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3910812a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-03T18:58:42,900 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3a86d190{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e8d1147c-5b65-f1cd-ceb8-11e11ff6bdc8/java.io.tmpdir/jetty-localhost-36525-hadoop-hdfs-3_4_1-tests_jar-_-any-6065474893936310131/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-03T18:58:42,900 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7a47ab0e{HTTP/1.1, (http/1.1)}{localhost:36525} 2024-12-03T18:58:42,900 INFO [Time-limited test {}] server.Server(415): Started @320777ms 2024-12-03T18:58:42,913 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-03T18:58:43,142 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-03T18:58:43,145 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-03T18:58:43,148 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-03T18:58:43,148 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-03T18:58:43,148 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-03T18:58:43,149 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@23266789{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e8d1147c-5b65-f1cd-ceb8-11e11ff6bdc8/hadoop.log.dir/,AVAILABLE} 2024-12-03T18:58:43,149 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@54008d53{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-03T18:58:43,254 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@115f614b{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e8d1147c-5b65-f1cd-ceb8-11e11ff6bdc8/java.io.tmpdir/jetty-localhost-38821-hadoop-hdfs-3_4_1-tests_jar-_-any-6541292620820959751/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T18:58:43,254 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5d45b9b7{HTTP/1.1, (http/1.1)}{localhost:38821} 2024-12-03T18:58:43,254 INFO [Time-limited test {}] server.Server(415): Started @321131ms 2024-12-03T18:58:43,255 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-03T18:58:43,282 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-03T18:58:43,285 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-03T18:58:43,286 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-03T18:58:43,286 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-03T18:58:43,286 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-03T18:58:43,286 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@36507c53{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e8d1147c-5b65-f1cd-ceb8-11e11ff6bdc8/hadoop.log.dir/,AVAILABLE} 2024-12-03T18:58:43,287 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@fa9d1ff{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-03T18:58:43,403 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2eb38aaf{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e8d1147c-5b65-f1cd-ceb8-11e11ff6bdc8/java.io.tmpdir/jetty-localhost-32979-hadoop-hdfs-3_4_1-tests_jar-_-any-7072915311008703435/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T18:58:43,403 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@59532081{HTTP/1.1, (http/1.1)}{localhost:32979} 2024-12-03T18:58:43,403 INFO [Time-limited test {}] server.Server(415): Started @321280ms 2024-12-03T18:58:43,405 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-03T18:58:43,491 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,44293,1733252112865/db5a5ccf5be8%2C44293%2C1733252112865.1733252113091 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:58:43,562 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.meta.1733252112663.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:58:44,240 WARN [Thread-2509 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e8d1147c-5b65-f1cd-ceb8-11e11ff6bdc8/cluster_48c401af-08dc-bb2f-7152-f0bca1b62d1d/data/data2/current/BP-272720403-172.17.0.2-1733252322483/current, will proceed with Du for space computation calculation, 2024-12-03T18:58:44,240 WARN [Thread-2508 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e8d1147c-5b65-f1cd-ceb8-11e11ff6bdc8/cluster_48c401af-08dc-bb2f-7152-f0bca1b62d1d/data/data1/current/BP-272720403-172.17.0.2-1733252322483/current, will proceed with Du for space computation calculation, 2024-12-03T18:58:44,273 WARN [Thread-2472 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-03T18:58:44,275 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x4c2f6a3d174d729f with lease ID 0xcd0f8c33a7305001: Processing first storage report for DS-36b8577d-3051-49cf-bc10-eed4529cc261 from datanode DatanodeRegistration(127.0.0.1:45741, datanodeUuid=d282bb35-2459-4da2-90f9-5c35ea3d3a7e, infoPort=42867, infoSecurePort=0, ipcPort=39793, storageInfo=lv=-57;cid=testClusterID;nsid=385437104;c=1733252322483) 2024-12-03T18:58:44,275 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x4c2f6a3d174d729f with lease ID 0xcd0f8c33a7305001: from storage DS-36b8577d-3051-49cf-bc10-eed4529cc261 node DatanodeRegistration(127.0.0.1:45741, datanodeUuid=d282bb35-2459-4da2-90f9-5c35ea3d3a7e, infoPort=42867, infoSecurePort=0, ipcPort=39793, storageInfo=lv=-57;cid=testClusterID;nsid=385437104;c=1733252322483), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-03T18:58:44,275 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x4c2f6a3d174d729f with lease ID 0xcd0f8c33a7305001: Processing first storage report for DS-105f8974-8373-41e1-9970-0d6bf443be91 from datanode DatanodeRegistration(127.0.0.1:45741, datanodeUuid=d282bb35-2459-4da2-90f9-5c35ea3d3a7e, infoPort=42867, infoSecurePort=0, ipcPort=39793, storageInfo=lv=-57;cid=testClusterID;nsid=385437104;c=1733252322483) 2024-12-03T18:58:44,275 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x4c2f6a3d174d729f with lease ID 0xcd0f8c33a7305001: from storage DS-105f8974-8373-41e1-9970-0d6bf443be91 node DatanodeRegistration(127.0.0.1:45741, datanodeUuid=d282bb35-2459-4da2-90f9-5c35ea3d3a7e, infoPort=42867, infoSecurePort=0, ipcPort=39793, storageInfo=lv=-57;cid=testClusterID;nsid=385437104;c=1733252322483), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-03T18:58:44,367 WARN [Thread-2519 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e8d1147c-5b65-f1cd-ceb8-11e11ff6bdc8/cluster_48c401af-08dc-bb2f-7152-f0bca1b62d1d/data/data3/current/BP-272720403-172.17.0.2-1733252322483/current, will proceed with Du for space computation calculation, 2024-12-03T18:58:44,367 WARN [Thread-2520 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e8d1147c-5b65-f1cd-ceb8-11e11ff6bdc8/cluster_48c401af-08dc-bb2f-7152-f0bca1b62d1d/data/data4/current/BP-272720403-172.17.0.2-1733252322483/current, will proceed with Du for space computation calculation, 2024-12-03T18:58:44,393 WARN [Thread-2495 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-03T18:58:44,394 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x1d9845c18f9d1f5f with lease ID 0xcd0f8c33a7305002: Processing first storage report for DS-56c40de7-4515-4a9d-a905-bcc38a2b6c24 from datanode DatanodeRegistration(127.0.0.1:39097, datanodeUuid=9e401a9f-c336-4957-ab88-2ba80342f70d, infoPort=44203, infoSecurePort=0, ipcPort=33733, storageInfo=lv=-57;cid=testClusterID;nsid=385437104;c=1733252322483) 2024-12-03T18:58:44,394 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x1d9845c18f9d1f5f with lease ID 0xcd0f8c33a7305002: from storage DS-56c40de7-4515-4a9d-a905-bcc38a2b6c24 node DatanodeRegistration(127.0.0.1:39097, datanodeUuid=9e401a9f-c336-4957-ab88-2ba80342f70d, infoPort=44203, infoSecurePort=0, ipcPort=33733, storageInfo=lv=-57;cid=testClusterID;nsid=385437104;c=1733252322483), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-03T18:58:44,394 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x1d9845c18f9d1f5f with lease ID 0xcd0f8c33a7305002: Processing first storage report for DS-437478df-0f4b-4f04-81c3-6c02795bf057 from datanode DatanodeRegistration(127.0.0.1:39097, datanodeUuid=9e401a9f-c336-4957-ab88-2ba80342f70d, infoPort=44203, infoSecurePort=0, ipcPort=33733, storageInfo=lv=-57;cid=testClusterID;nsid=385437104;c=1733252322483) 2024-12-03T18:58:44,394 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x1d9845c18f9d1f5f with lease ID 0xcd0f8c33a7305002: from storage DS-437478df-0f4b-4f04-81c3-6c02795bf057 node DatanodeRegistration(127.0.0.1:39097, datanodeUuid=9e401a9f-c336-4957-ab88-2ba80342f70d, infoPort=44203, infoSecurePort=0, ipcPort=33733, storageInfo=lv=-57;cid=testClusterID;nsid=385437104;c=1733252322483), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-03T18:58:44,440 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e8d1147c-5b65-f1cd-ceb8-11e11ff6bdc8 2024-12-03T18:58:44,443 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e8d1147c-5b65-f1cd-ceb8-11e11ff6bdc8/cluster_48c401af-08dc-bb2f-7152-f0bca1b62d1d/zookeeper_0, clientPort=56511, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e8d1147c-5b65-f1cd-ceb8-11e11ff6bdc8/cluster_48c401af-08dc-bb2f-7152-f0bca1b62d1d/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e8d1147c-5b65-f1cd-ceb8-11e11ff6bdc8/cluster_48c401af-08dc-bb2f-7152-f0bca1b62d1d/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-03T18:58:44,444 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=56511 2024-12-03T18:58:44,444 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T18:58:44,445 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T18:58:44,451 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39097 is added to blk_1073741825_1001 (size=7) 2024-12-03T18:58:44,451 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45741 is added to blk_1073741825_1001 (size=7) 2024-12-03T18:58:44,453 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:42763/user/jenkins/test-data/aa1b2bc7-610d-7058-8e78-39c9d07d9768 with version=8 2024-12-03T18:58:44,453 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:42673/user/jenkins/test-data/6807bb60-f78c-0ede-7870-b94fe8933208/hbase-staging 2024-12-03T18:58:44,455 INFO [Time-limited test {}] client.ConnectionUtils(128): master/db5a5ccf5be8:0 server-side Connection retries=45 2024-12-03T18:58:44,455 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-03T18:58:44,455 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-03T18:58:44,455 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-03T18:58:44,455 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-03T18:58:44,455 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-03T18:58:44,455 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-03T18:58:44,455 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-03T18:58:44,456 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:38913 2024-12-03T18:58:44,457 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:38913 connecting to ZooKeeper ensemble=127.0.0.1:56511 2024-12-03T18:58:44,492 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,44293,1733252112865/db5a5ccf5be8%2C44293%2C1733252112865.1733252113091 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:58:44,504 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:389130x0, quorum=127.0.0.1:56511, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-03T18:58:44,505 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:38913-0x1019c8f98060000 connected 2024-12-03T18:58:44,563 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.meta.1733252112663.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:58:44,585 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T18:58:44,587 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T18:58:44,589 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:38913-0x1019c8f98060000, quorum=127.0.0.1:56511, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-03T18:58:44,590 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:42763/user/jenkins/test-data/aa1b2bc7-610d-7058-8e78-39c9d07d9768, hbase.cluster.distributed=false 2024-12-03T18:58:44,591 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:38913-0x1019c8f98060000, quorum=127.0.0.1:56511, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-03T18:58:44,592 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=38913 2024-12-03T18:58:44,592 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=38913 2024-12-03T18:58:44,593 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=38913 2024-12-03T18:58:44,593 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=38913 2024-12-03T18:58:44,593 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=38913 2024-12-03T18:58:44,610 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/db5a5ccf5be8:0 server-side Connection retries=45 2024-12-03T18:58:44,610 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-03T18:58:44,610 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-03T18:58:44,611 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-03T18:58:44,611 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-03T18:58:44,611 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-03T18:58:44,611 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-03T18:58:44,611 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-03T18:58:44,611 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:37685 2024-12-03T18:58:44,612 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:37685 connecting to ZooKeeper ensemble=127.0.0.1:56511 2024-12-03T18:58:44,613 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T18:58:44,614 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T18:58:44,682 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:376850x0, quorum=127.0.0.1:56511, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-03T18:58:44,683 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:376850x0, quorum=127.0.0.1:56511, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-03T18:58:44,683 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:37685-0x1019c8f98060001 connected 2024-12-03T18:58:44,683 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-03T18:58:44,684 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-03T18:58:44,685 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37685-0x1019c8f98060001, quorum=127.0.0.1:56511, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-03T18:58:44,686 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37685-0x1019c8f98060001, quorum=127.0.0.1:56511, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-03T18:58:44,687 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=37685 2024-12-03T18:58:44,687 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=37685 2024-12-03T18:58:44,687 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=37685 2024-12-03T18:58:44,688 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=37685 2024-12-03T18:58:44,688 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=37685 2024-12-03T18:58:44,701 DEBUG [M:0;db5a5ccf5be8:38913 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;db5a5ccf5be8:38913 2024-12-03T18:58:44,702 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/db5a5ccf5be8,38913,1733252324455 2024-12-03T18:58:44,749 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38913-0x1019c8f98060000, quorum=127.0.0.1:56511, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-03T18:58:44,749 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37685-0x1019c8f98060001, quorum=127.0.0.1:56511, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-03T18:58:44,750 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:38913-0x1019c8f98060000, quorum=127.0.0.1:56511, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/db5a5ccf5be8,38913,1733252324455 2024-12-03T18:58:44,859 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37685-0x1019c8f98060001, quorum=127.0.0.1:56511, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-03T18:58:44,859 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38913-0x1019c8f98060000, quorum=127.0.0.1:56511, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T18:58:44,859 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37685-0x1019c8f98060001, quorum=127.0.0.1:56511, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T18:58:44,859 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:38913-0x1019c8f98060000, quorum=127.0.0.1:56511, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-03T18:58:44,860 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/db5a5ccf5be8,38913,1733252324455 from backup master directory 2024-12-03T18:58:44,974 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37685-0x1019c8f98060001, quorum=127.0.0.1:56511, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-03T18:58:44,974 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38913-0x1019c8f98060000, quorum=127.0.0.1:56511, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/db5a5ccf5be8,38913,1733252324455 2024-12-03T18:58:44,975 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38913-0x1019c8f98060000, quorum=127.0.0.1:56511, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-03T18:58:44,975 WARN [master/db5a5ccf5be8:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-03T18:58:44,975 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=db5a5ccf5be8,38913,1733252324455 2024-12-03T18:58:44,979 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:42763/user/jenkins/test-data/aa1b2bc7-610d-7058-8e78-39c9d07d9768/hbase.id] with ID: b76ef66a-a9e8-44ec-a806-da4a85e39409 2024-12-03T18:58:44,979 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:42763/user/jenkins/test-data/aa1b2bc7-610d-7058-8e78-39c9d07d9768/.tmp/hbase.id 2024-12-03T18:58:44,985 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45741 is added to blk_1073741826_1002 (size=42) 2024-12-03T18:58:44,986 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39097 is added to blk_1073741826_1002 (size=42) 2024-12-03T18:58:44,987 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:42763/user/jenkins/test-data/aa1b2bc7-610d-7058-8e78-39c9d07d9768/.tmp/hbase.id]:[hdfs://localhost:42763/user/jenkins/test-data/aa1b2bc7-610d-7058-8e78-39c9d07d9768/hbase.id] 2024-12-03T18:58:44,998 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T18:58:44,998 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-03T18:58:45,000 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-12-03T18:58:45,012 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38913-0x1019c8f98060000, quorum=127.0.0.1:56511, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T18:58:45,012 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37685-0x1019c8f98060001, quorum=127.0.0.1:56511, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T18:58:45,017 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45741 is added to blk_1073741827_1003 (size=196) 2024-12-03T18:58:45,017 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39097 is added to blk_1073741827_1003 (size=196) 2024-12-03T18:58:45,018 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-03T18:58:45,019 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-03T18:58:45,019 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-03T18:58:45,025 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45741 is added to blk_1073741828_1004 (size=1189) 2024-12-03T18:58:45,025 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39097 is added to blk_1073741828_1004 (size=1189) 2024-12-03T18:58:45,026 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:42763/user/jenkins/test-data/aa1b2bc7-610d-7058-8e78-39c9d07d9768/MasterData/data/master/store 2024-12-03T18:58:45,032 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39097 is added to blk_1073741829_1005 (size=34) 2024-12-03T18:58:45,032 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45741 is added to blk_1073741829_1005 (size=34) 2024-12-03T18:58:45,032 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T18:58:45,033 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-03T18:58:45,033 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T18:58:45,033 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T18:58:45,033 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-03T18:58:45,033 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T18:58:45,033 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T18:58:45,033 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733252325032Disabling compacts and flushes for region at 1733252325032Disabling writes for close at 1733252325033 (+1 ms)Writing region close event to WAL at 1733252325033Closed at 1733252325033 2024-12-03T18:58:45,034 WARN [master/db5a5ccf5be8:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:42763/user/jenkins/test-data/aa1b2bc7-610d-7058-8e78-39c9d07d9768/MasterData/data/master/store/.initializing 2024-12-03T18:58:45,034 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:42763/user/jenkins/test-data/aa1b2bc7-610d-7058-8e78-39c9d07d9768/MasterData/WALs/db5a5ccf5be8,38913,1733252324455 2024-12-03T18:58:45,036 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=db5a5ccf5be8%2C38913%2C1733252324455, suffix=, logDir=hdfs://localhost:42763/user/jenkins/test-data/aa1b2bc7-610d-7058-8e78-39c9d07d9768/MasterData/WALs/db5a5ccf5be8,38913,1733252324455, archiveDir=hdfs://localhost:42763/user/jenkins/test-data/aa1b2bc7-610d-7058-8e78-39c9d07d9768/MasterData/oldWALs, maxLogs=10 2024-12-03T18:58:45,036 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor db5a5ccf5be8%2C38913%2C1733252324455.1733252325036 2024-12-03T18:58:45,042 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/aa1b2bc7-610d-7058-8e78-39c9d07d9768/MasterData/WALs/db5a5ccf5be8,38913,1733252324455/db5a5ccf5be8%2C38913%2C1733252324455.1733252325036 2024-12-03T18:58:45,043 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44203:44203),(127.0.0.1/127.0.0.1:42867:42867)] 2024-12-03T18:58:45,043 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-03T18:58:45,044 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T18:58:45,044 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-03T18:58:45,044 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-03T18:58:45,056 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-03T18:58:45,058 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-03T18:58:45,058 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T18:58:45,058 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T18:58:45,058 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-03T18:58:45,060 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-03T18:58:45,060 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T18:58:45,060 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T18:58:45,060 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-03T18:58:45,061 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-03T18:58:45,061 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T18:58:45,062 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T18:58:45,062 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-03T18:58:45,063 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-03T18:58:45,063 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T18:58:45,063 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T18:58:45,064 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-03T18:58:45,064 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42763/user/jenkins/test-data/aa1b2bc7-610d-7058-8e78-39c9d07d9768/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-03T18:58:45,065 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42763/user/jenkins/test-data/aa1b2bc7-610d-7058-8e78-39c9d07d9768/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-03T18:58:45,066 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-03T18:58:45,066 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-03T18:58:45,066 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-03T18:58:45,067 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-03T18:58:45,069 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42763/user/jenkins/test-data/aa1b2bc7-610d-7058-8e78-39c9d07d9768/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-03T18:58:45,069 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=802622, jitterRate=0.020587250590324402}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-03T18:58:45,070 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733252325044Initializing all the Stores at 1733252325045 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733252325045Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733252325056 (+11 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733252325056Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733252325056Cleaning up temporary data from old regions at 1733252325066 (+10 ms)Region opened successfully at 1733252325070 (+4 ms) 2024-12-03T18:58:45,070 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-03T18:58:45,072 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@56348cef, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=db5a5ccf5be8/172.17.0.2:0 2024-12-03T18:58:45,073 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-03T18:58:45,073 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-03T18:58:45,073 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-03T18:58:45,073 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-03T18:58:45,074 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-12-03T18:58:45,074 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-12-03T18:58:45,074 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-03T18:58:45,076 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-03T18:58:45,077 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38913-0x1019c8f98060000, quorum=127.0.0.1:56511, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-03T18:58:45,085 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-03T18:58:45,086 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-03T18:58:45,086 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38913-0x1019c8f98060000, quorum=127.0.0.1:56511, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-03T18:58:45,096 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-03T18:58:45,096 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-03T18:58:45,097 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38913-0x1019c8f98060000, quorum=127.0.0.1:56511, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-03T18:58:45,106 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-03T18:58:45,107 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38913-0x1019c8f98060000, quorum=127.0.0.1:56511, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-03T18:58:45,117 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-03T18:58:45,120 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38913-0x1019c8f98060000, quorum=127.0.0.1:56511, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-03T18:58:45,132 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-03T18:58:45,143 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38913-0x1019c8f98060000, quorum=127.0.0.1:56511, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-03T18:58:45,143 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37685-0x1019c8f98060001, quorum=127.0.0.1:56511, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-03T18:58:45,143 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37685-0x1019c8f98060001, quorum=127.0.0.1:56511, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T18:58:45,143 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38913-0x1019c8f98060000, quorum=127.0.0.1:56511, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T18:58:45,143 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=db5a5ccf5be8,38913,1733252324455, sessionid=0x1019c8f98060000, setting cluster-up flag (Was=false) 2024-12-03T18:58:45,240 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38913-0x1019c8f98060000, quorum=127.0.0.1:56511, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T18:58:45,240 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37685-0x1019c8f98060001, quorum=127.0.0.1:56511, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T18:58:45,290 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-03T18:58:45,291 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=db5a5ccf5be8,38913,1733252324455 2024-12-03T18:58:45,311 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38913-0x1019c8f98060000, quorum=127.0.0.1:56511, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T18:58:45,311 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37685-0x1019c8f98060001, quorum=127.0.0.1:56511, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T18:58:45,343 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-03T18:58:45,344 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=db5a5ccf5be8,38913,1733252324455 2024-12-03T18:58:45,345 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:42763/user/jenkins/test-data/aa1b2bc7-610d-7058-8e78-39c9d07d9768/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-03T18:58:45,347 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-03T18:58:45,347 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-03T18:58:45,347 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-03T18:58:45,347 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: db5a5ccf5be8,38913,1733252324455 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-03T18:58:45,349 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/db5a5ccf5be8:0, corePoolSize=5, maxPoolSize=5 2024-12-03T18:58:45,349 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/db5a5ccf5be8:0, corePoolSize=5, maxPoolSize=5 2024-12-03T18:58:45,349 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/db5a5ccf5be8:0, corePoolSize=5, maxPoolSize=5 2024-12-03T18:58:45,349 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/db5a5ccf5be8:0, corePoolSize=5, maxPoolSize=5 2024-12-03T18:58:45,349 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/db5a5ccf5be8:0, corePoolSize=10, maxPoolSize=10 2024-12-03T18:58:45,349 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/db5a5ccf5be8:0, corePoolSize=1, maxPoolSize=1 2024-12-03T18:58:45,349 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/db5a5ccf5be8:0, corePoolSize=2, maxPoolSize=2 2024-12-03T18:58:45,349 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/db5a5ccf5be8:0, corePoolSize=1, maxPoolSize=1 2024-12-03T18:58:45,351 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733252355351 2024-12-03T18:58:45,351 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-03T18:58:45,351 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-03T18:58:45,351 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-03T18:58:45,351 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-03T18:58:45,351 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-03T18:58:45,351 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-03T18:58:45,352 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-03T18:58:45,352 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-03T18:58:45,352 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-03T18:58:45,352 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-03T18:58:45,352 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-03T18:58:45,352 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-03T18:58:45,353 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-03T18:58:45,353 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-03T18:58:45,353 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/db5a5ccf5be8:0:becomeActiveMaster-HFileCleaner.large.0-1733252325353,5,FailOnTimeoutGroup] 2024-12-03T18:58:45,353 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T18:58:45,353 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/db5a5ccf5be8:0:becomeActiveMaster-HFileCleaner.small.0-1733252325353,5,FailOnTimeoutGroup] 2024-12-03T18:58:45,353 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-03T18:58:45,353 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-03T18:58:45,353 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-03T18:58:45,353 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-03T18:58:45,353 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-03T18:58:45,358 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45741 is added to blk_1073741831_1007 (size=1321) 2024-12-03T18:58:45,359 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39097 is added to blk_1073741831_1007 (size=1321) 2024-12-03T18:58:45,359 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:42763/user/jenkins/test-data/aa1b2bc7-610d-7058-8e78-39c9d07d9768/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-03T18:58:45,360 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:42763/user/jenkins/test-data/aa1b2bc7-610d-7058-8e78-39c9d07d9768 2024-12-03T18:58:45,365 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45741 is added to blk_1073741832_1008 (size=32) 2024-12-03T18:58:45,365 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39097 is added to blk_1073741832_1008 (size=32) 2024-12-03T18:58:45,366 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T18:58:45,367 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-03T18:58:45,368 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-03T18:58:45,368 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T18:58:45,369 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T18:58:45,369 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-03T18:58:45,370 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-03T18:58:45,370 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T18:58:45,370 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T18:58:45,370 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-03T18:58:45,371 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-03T18:58:45,371 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T18:58:45,372 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T18:58:45,372 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-03T18:58:45,373 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-03T18:58:45,373 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T18:58:45,374 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T18:58:45,374 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-03T18:58:45,374 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42763/user/jenkins/test-data/aa1b2bc7-610d-7058-8e78-39c9d07d9768/data/hbase/meta/1588230740 2024-12-03T18:58:45,374 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42763/user/jenkins/test-data/aa1b2bc7-610d-7058-8e78-39c9d07d9768/data/hbase/meta/1588230740 2024-12-03T18:58:45,375 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-03T18:58:45,375 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-03T18:58:45,376 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-03T18:58:45,377 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-03T18:58:45,378 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42763/user/jenkins/test-data/aa1b2bc7-610d-7058-8e78-39c9d07d9768/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-03T18:58:45,379 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=707295, jitterRate=-0.1006283313035965}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-03T18:58:45,379 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733252325366Initializing all the Stores at 1733252325366Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733252325366Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733252325367 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733252325367Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733252325367Cleaning up temporary data from old regions at 1733252325375 (+8 ms)Region opened successfully at 1733252325379 (+4 ms) 2024-12-03T18:58:45,379 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-03T18:58:45,379 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-03T18:58:45,379 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-03T18:58:45,379 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-03T18:58:45,379 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-03T18:58:45,380 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-03T18:58:45,380 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733252325379Disabling compacts and flushes for region at 1733252325379Disabling writes for close at 1733252325379Writing region close event to WAL at 1733252325380 (+1 ms)Closed at 1733252325380 2024-12-03T18:58:45,380 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-03T18:58:45,381 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-03T18:58:45,381 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-03T18:58:45,382 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-03T18:58:45,382 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-03T18:58:45,390 INFO [RS:0;db5a5ccf5be8:37685 {}] regionserver.HRegionServer(746): ClusterId : b76ef66a-a9e8-44ec-a806-da4a85e39409 2024-12-03T18:58:45,391 DEBUG [RS:0;db5a5ccf5be8:37685 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-03T18:58:45,402 DEBUG [RS:0;db5a5ccf5be8:37685 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-03T18:58:45,402 DEBUG [RS:0;db5a5ccf5be8:37685 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-03T18:58:45,413 DEBUG [RS:0;db5a5ccf5be8:37685 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-03T18:58:45,413 DEBUG [RS:0;db5a5ccf5be8:37685 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@586bad08, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=db5a5ccf5be8/172.17.0.2:0 2024-12-03T18:58:45,428 DEBUG [RS:0;db5a5ccf5be8:37685 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;db5a5ccf5be8:37685 2024-12-03T18:58:45,428 INFO [RS:0;db5a5ccf5be8:37685 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-03T18:58:45,428 INFO [RS:0;db5a5ccf5be8:37685 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-03T18:58:45,428 DEBUG [RS:0;db5a5ccf5be8:37685 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-03T18:58:45,428 INFO [RS:0;db5a5ccf5be8:37685 {}] regionserver.HRegionServer(2659): reportForDuty to master=db5a5ccf5be8,38913,1733252324455 with port=37685, startcode=1733252324610 2024-12-03T18:58:45,428 DEBUG [RS:0;db5a5ccf5be8:37685 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-03T18:58:45,431 INFO [HMaster-EventLoopGroup-16-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46027, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.7 (auth:SIMPLE), service=RegionServerStatusService 2024-12-03T18:58:45,432 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38913 {}] master.ServerManager(363): Checking decommissioned status of RegionServer db5a5ccf5be8,37685,1733252324610 2024-12-03T18:58:45,432 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38913 {}] master.ServerManager(517): Registering regionserver=db5a5ccf5be8,37685,1733252324610 2024-12-03T18:58:45,433 DEBUG [RS:0;db5a5ccf5be8:37685 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:42763/user/jenkins/test-data/aa1b2bc7-610d-7058-8e78-39c9d07d9768 2024-12-03T18:58:45,433 DEBUG [RS:0;db5a5ccf5be8:37685 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:42763 2024-12-03T18:58:45,433 DEBUG [RS:0;db5a5ccf5be8:37685 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-03T18:58:45,443 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38913-0x1019c8f98060000, quorum=127.0.0.1:56511, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-03T18:58:45,444 DEBUG [RS:0;db5a5ccf5be8:37685 {}] zookeeper.ZKUtil(111): regionserver:37685-0x1019c8f98060001, quorum=127.0.0.1:56511, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/db5a5ccf5be8,37685,1733252324610 2024-12-03T18:58:45,444 WARN [RS:0;db5a5ccf5be8:37685 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-03T18:58:45,444 INFO [RS:0;db5a5ccf5be8:37685 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-03T18:58:45,444 DEBUG [RS:0;db5a5ccf5be8:37685 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:42763/user/jenkins/test-data/aa1b2bc7-610d-7058-8e78-39c9d07d9768/WALs/db5a5ccf5be8,37685,1733252324610 2024-12-03T18:58:45,444 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [db5a5ccf5be8,37685,1733252324610] 2024-12-03T18:58:45,447 INFO [RS:0;db5a5ccf5be8:37685 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-03T18:58:45,449 INFO [RS:0;db5a5ccf5be8:37685 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-03T18:58:45,449 INFO [RS:0;db5a5ccf5be8:37685 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-03T18:58:45,449 INFO [RS:0;db5a5ccf5be8:37685 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-03T18:58:45,450 INFO [RS:0;db5a5ccf5be8:37685 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-03T18:58:45,450 INFO [RS:0;db5a5ccf5be8:37685 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-03T18:58:45,450 INFO [RS:0;db5a5ccf5be8:37685 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-03T18:58:45,451 DEBUG [RS:0;db5a5ccf5be8:37685 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/db5a5ccf5be8:0, corePoolSize=1, maxPoolSize=1 2024-12-03T18:58:45,451 DEBUG [RS:0;db5a5ccf5be8:37685 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/db5a5ccf5be8:0, corePoolSize=1, maxPoolSize=1 2024-12-03T18:58:45,451 DEBUG [RS:0;db5a5ccf5be8:37685 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/db5a5ccf5be8:0, corePoolSize=1, maxPoolSize=1 2024-12-03T18:58:45,451 DEBUG [RS:0;db5a5ccf5be8:37685 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/db5a5ccf5be8:0, corePoolSize=1, maxPoolSize=1 2024-12-03T18:58:45,451 DEBUG [RS:0;db5a5ccf5be8:37685 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/db5a5ccf5be8:0, corePoolSize=1, maxPoolSize=1 2024-12-03T18:58:45,451 DEBUG [RS:0;db5a5ccf5be8:37685 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/db5a5ccf5be8:0, corePoolSize=2, maxPoolSize=2 2024-12-03T18:58:45,451 DEBUG [RS:0;db5a5ccf5be8:37685 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/db5a5ccf5be8:0, corePoolSize=1, maxPoolSize=1 2024-12-03T18:58:45,451 DEBUG [RS:0;db5a5ccf5be8:37685 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/db5a5ccf5be8:0, corePoolSize=1, maxPoolSize=1 2024-12-03T18:58:45,451 DEBUG [RS:0;db5a5ccf5be8:37685 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/db5a5ccf5be8:0, corePoolSize=1, maxPoolSize=1 2024-12-03T18:58:45,451 DEBUG [RS:0;db5a5ccf5be8:37685 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/db5a5ccf5be8:0, corePoolSize=1, maxPoolSize=1 2024-12-03T18:58:45,451 DEBUG [RS:0;db5a5ccf5be8:37685 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/db5a5ccf5be8:0, corePoolSize=1, maxPoolSize=1 2024-12-03T18:58:45,451 DEBUG [RS:0;db5a5ccf5be8:37685 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/db5a5ccf5be8:0, corePoolSize=1, maxPoolSize=1 2024-12-03T18:58:45,451 DEBUG [RS:0;db5a5ccf5be8:37685 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/db5a5ccf5be8:0, corePoolSize=3, maxPoolSize=3 2024-12-03T18:58:45,451 DEBUG [RS:0;db5a5ccf5be8:37685 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/db5a5ccf5be8:0, corePoolSize=3, maxPoolSize=3 2024-12-03T18:58:45,452 INFO [RS:0;db5a5ccf5be8:37685 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-03T18:58:45,452 INFO [RS:0;db5a5ccf5be8:37685 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-03T18:58:45,452 INFO [RS:0;db5a5ccf5be8:37685 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-03T18:58:45,452 INFO [RS:0;db5a5ccf5be8:37685 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-03T18:58:45,452 INFO [RS:0;db5a5ccf5be8:37685 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-03T18:58:45,452 INFO [RS:0;db5a5ccf5be8:37685 {}] hbase.ChoreService(168): Chore ScheduledChore name=db5a5ccf5be8,37685,1733252324610-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-03T18:58:45,468 INFO [RS:0;db5a5ccf5be8:37685 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-03T18:58:45,468 INFO [RS:0;db5a5ccf5be8:37685 {}] hbase.ChoreService(168): Chore ScheduledChore name=db5a5ccf5be8,37685,1733252324610-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-03T18:58:45,468 INFO [RS:0;db5a5ccf5be8:37685 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T18:58:45,468 INFO [RS:0;db5a5ccf5be8:37685 {}] regionserver.Replication(171): db5a5ccf5be8,37685,1733252324610 started 2024-12-03T18:58:45,480 INFO [RS:0;db5a5ccf5be8:37685 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T18:58:45,480 INFO [RS:0;db5a5ccf5be8:37685 {}] regionserver.HRegionServer(1482): Serving as db5a5ccf5be8,37685,1733252324610, RpcServer on db5a5ccf5be8/172.17.0.2:37685, sessionid=0x1019c8f98060001 2024-12-03T18:58:45,481 DEBUG [RS:0;db5a5ccf5be8:37685 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-03T18:58:45,481 DEBUG [RS:0;db5a5ccf5be8:37685 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager db5a5ccf5be8,37685,1733252324610 2024-12-03T18:58:45,481 DEBUG [RS:0;db5a5ccf5be8:37685 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'db5a5ccf5be8,37685,1733252324610' 2024-12-03T18:58:45,481 DEBUG [RS:0;db5a5ccf5be8:37685 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-03T18:58:45,481 DEBUG [RS:0;db5a5ccf5be8:37685 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-03T18:58:45,481 DEBUG [RS:0;db5a5ccf5be8:37685 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-03T18:58:45,481 DEBUG [RS:0;db5a5ccf5be8:37685 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-03T18:58:45,481 DEBUG [RS:0;db5a5ccf5be8:37685 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager db5a5ccf5be8,37685,1733252324610 2024-12-03T18:58:45,481 DEBUG [RS:0;db5a5ccf5be8:37685 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'db5a5ccf5be8,37685,1733252324610' 2024-12-03T18:58:45,481 DEBUG [RS:0;db5a5ccf5be8:37685 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-03T18:58:45,482 DEBUG [RS:0;db5a5ccf5be8:37685 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-03T18:58:45,482 DEBUG [RS:0;db5a5ccf5be8:37685 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-03T18:58:45,482 INFO [RS:0;db5a5ccf5be8:37685 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-03T18:58:45,482 INFO [RS:0;db5a5ccf5be8:37685 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-03T18:58:45,492 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,44293,1733252112865/db5a5ccf5be8%2C44293%2C1733252112865.1733252113091 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:58:45,533 WARN [db5a5ccf5be8:38913 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-03T18:58:45,563 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.meta.1733252112663.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:58:45,584 INFO [RS:0;db5a5ccf5be8:37685 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=db5a5ccf5be8%2C37685%2C1733252324610, suffix=, logDir=hdfs://localhost:42763/user/jenkins/test-data/aa1b2bc7-610d-7058-8e78-39c9d07d9768/WALs/db5a5ccf5be8,37685,1733252324610, archiveDir=hdfs://localhost:42763/user/jenkins/test-data/aa1b2bc7-610d-7058-8e78-39c9d07d9768/oldWALs, maxLogs=32 2024-12-03T18:58:45,584 INFO [RS:0;db5a5ccf5be8:37685 {}] monitor.StreamSlowMonitor(122): New stream slow monitor db5a5ccf5be8%2C37685%2C1733252324610.1733252325584 2024-12-03T18:58:45,590 INFO [RS:0;db5a5ccf5be8:37685 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/aa1b2bc7-610d-7058-8e78-39c9d07d9768/WALs/db5a5ccf5be8,37685,1733252324610/db5a5ccf5be8%2C37685%2C1733252324610.1733252325584 2024-12-03T18:58:45,591 DEBUG [RS:0;db5a5ccf5be8:37685 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42867:42867),(127.0.0.1/127.0.0.1:44203:44203)] 2024-12-03T18:58:45,783 DEBUG [db5a5ccf5be8:38913 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-03T18:58:45,783 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=db5a5ccf5be8,37685,1733252324610 2024-12-03T18:58:45,784 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as db5a5ccf5be8,37685,1733252324610, state=OPENING 2024-12-03T18:58:45,864 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-03T18:58:45,875 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38913-0x1019c8f98060000, quorum=127.0.0.1:56511, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T18:58:45,875 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37685-0x1019c8f98060001, quorum=127.0.0.1:56511, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T18:58:45,875 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-03T18:58:45,875 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-03T18:58:45,875 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-03T18:58:45,876 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=db5a5ccf5be8,37685,1733252324610}] 2024-12-03T18:58:46,029 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-03T18:58:46,031 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38429, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-03T18:58:46,035 INFO [RS_OPEN_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-03T18:58:46,035 INFO [RS_OPEN_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-03T18:58:46,038 INFO [RS_OPEN_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=db5a5ccf5be8%2C37685%2C1733252324610.meta, suffix=.meta, logDir=hdfs://localhost:42763/user/jenkins/test-data/aa1b2bc7-610d-7058-8e78-39c9d07d9768/WALs/db5a5ccf5be8,37685,1733252324610, archiveDir=hdfs://localhost:42763/user/jenkins/test-data/aa1b2bc7-610d-7058-8e78-39c9d07d9768/oldWALs, maxLogs=32 2024-12-03T18:58:46,038 INFO [RS_OPEN_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor db5a5ccf5be8%2C37685%2C1733252324610.meta.1733252326038.meta 2024-12-03T18:58:46,048 INFO [RS_OPEN_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/aa1b2bc7-610d-7058-8e78-39c9d07d9768/WALs/db5a5ccf5be8,37685,1733252324610/db5a5ccf5be8%2C37685%2C1733252324610.meta.1733252326038.meta 2024-12-03T18:58:46,052 DEBUG [RS_OPEN_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44203:44203),(127.0.0.1/127.0.0.1:42867:42867)] 2024-12-03T18:58:46,052 DEBUG [RS_OPEN_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-03T18:58:46,053 DEBUG [RS_OPEN_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-03T18:58:46,053 DEBUG [RS_OPEN_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-03T18:58:46,053 INFO [RS_OPEN_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-03T18:58:46,053 DEBUG [RS_OPEN_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-03T18:58:46,053 DEBUG [RS_OPEN_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T18:58:46,053 DEBUG [RS_OPEN_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-03T18:58:46,053 DEBUG [RS_OPEN_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-03T18:58:46,054 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-03T18:58:46,055 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-03T18:58:46,055 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T18:58:46,056 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T18:58:46,056 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-03T18:58:46,057 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-03T18:58:46,057 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T18:58:46,057 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T18:58:46,057 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-03T18:58:46,058 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-03T18:58:46,058 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T18:58:46,058 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T18:58:46,058 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-03T18:58:46,059 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-03T18:58:46,059 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T18:58:46,059 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T18:58:46,059 DEBUG [RS_OPEN_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-03T18:58:46,060 DEBUG [RS_OPEN_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42763/user/jenkins/test-data/aa1b2bc7-610d-7058-8e78-39c9d07d9768/data/hbase/meta/1588230740 2024-12-03T18:58:46,061 DEBUG [RS_OPEN_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42763/user/jenkins/test-data/aa1b2bc7-610d-7058-8e78-39c9d07d9768/data/hbase/meta/1588230740 2024-12-03T18:58:46,062 DEBUG [RS_OPEN_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-03T18:58:46,062 DEBUG [RS_OPEN_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-03T18:58:46,062 DEBUG [RS_OPEN_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-03T18:58:46,063 DEBUG [RS_OPEN_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-03T18:58:46,064 INFO [RS_OPEN_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=808026, jitterRate=0.027458608150482178}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-03T18:58:46,064 DEBUG [RS_OPEN_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-03T18:58:46,064 DEBUG [RS_OPEN_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733252326053Writing region info on filesystem at 1733252326053Initializing all the Stores at 1733252326054 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733252326054Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733252326054Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733252326054Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733252326054Cleaning up temporary data from old regions at 1733252326062 (+8 ms)Running coprocessor post-open hooks at 1733252326064 (+2 ms)Region opened successfully at 1733252326064 2024-12-03T18:58:46,065 INFO [RS_OPEN_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733252326029 2024-12-03T18:58:46,067 DEBUG [RS_OPEN_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-03T18:58:46,067 INFO [RS_OPEN_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-03T18:58:46,068 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=db5a5ccf5be8,37685,1733252324610 2024-12-03T18:58:46,068 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as db5a5ccf5be8,37685,1733252324610, state=OPEN 2024-12-03T18:58:46,105 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37685-0x1019c8f98060001, quorum=127.0.0.1:56511, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-03T18:58:46,105 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38913-0x1019c8f98060000, quorum=127.0.0.1:56511, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-03T18:58:46,105 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=db5a5ccf5be8,37685,1733252324610 2024-12-03T18:58:46,105 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-03T18:58:46,105 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-03T18:58:46,107 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-03T18:58:46,107 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=db5a5ccf5be8,37685,1733252324610 in 230 msec 2024-12-03T18:58:46,110 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-03T18:58:46,110 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 726 msec 2024-12-03T18:58:46,111 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-03T18:58:46,111 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-03T18:58:46,112 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-03T18:58:46,112 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=db5a5ccf5be8,37685,1733252324610, seqNum=-1] 2024-12-03T18:58:46,112 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-03T18:58:46,114 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42547, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-03T18:58:46,118 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 771 msec 2024-12-03T18:58:46,119 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733252326119, completionTime=-1 2024-12-03T18:58:46,119 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-03T18:58:46,119 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-03T18:58:46,120 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-12-03T18:58:46,121 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733252386121 2024-12-03T18:58:46,121 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733252446121 2024-12-03T18:58:46,121 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 1 msec 2024-12-03T18:58:46,121 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=db5a5ccf5be8,38913,1733252324455-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-03T18:58:46,121 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=db5a5ccf5be8,38913,1733252324455-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T18:58:46,121 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=db5a5ccf5be8,38913,1733252324455-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T18:58:46,121 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-db5a5ccf5be8:38913, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T18:58:46,121 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-03T18:58:46,122 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-03T18:58:46,123 DEBUG [master/db5a5ccf5be8:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-03T18:58:46,125 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.150sec 2024-12-03T18:58:46,125 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-03T18:58:46,125 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-03T18:58:46,125 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-03T18:58:46,125 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-03T18:58:46,125 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-03T18:58:46,125 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=db5a5ccf5be8,38913,1733252324455-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-03T18:58:46,125 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=db5a5ccf5be8,38913,1733252324455-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-03T18:58:46,127 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-03T18:58:46,128 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-03T18:58:46,128 INFO [master/db5a5ccf5be8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=db5a5ccf5be8,38913,1733252324455-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T18:58:46,191 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@13830cb7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T18:58:46,191 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request db5a5ccf5be8,38913,-1 for getting cluster id 2024-12-03T18:58:46,191 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-03T18:58:46,193 DEBUG [HMaster-EventLoopGroup-16-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'b76ef66a-a9e8-44ec-a806-da4a85e39409' 2024-12-03T18:58:46,193 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-03T18:58:46,193 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "b76ef66a-a9e8-44ec-a806-da4a85e39409" 2024-12-03T18:58:46,193 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1e7671da, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T18:58:46,193 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [db5a5ccf5be8,38913,-1] 2024-12-03T18:58:46,194 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-03T18:58:46,194 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T18:58:46,195 INFO [HMaster-EventLoopGroup-16-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45110, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-03T18:58:46,196 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@ab592d1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T18:58:46,196 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-03T18:58:46,197 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=db5a5ccf5be8,37685,1733252324610, seqNum=-1] 2024-12-03T18:58:46,198 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-03T18:58:46,199 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60520, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-03T18:58:46,204 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=db5a5ccf5be8,38913,1733252324455 2024-12-03T18:58:46,205 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T18:58:46,207 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-12-03T18:58:46,207 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-03T18:58:46,209 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=test.com%2C8080%2C1, suffix=, logDir=hdfs://localhost:42763/user/jenkins/test-data/aa1b2bc7-610d-7058-8e78-39c9d07d9768/WALs/test.com,8080,1, archiveDir=hdfs://localhost:42763/user/jenkins/test-data/aa1b2bc7-610d-7058-8e78-39c9d07d9768/oldWALs, maxLogs=32 2024-12-03T18:58:46,209 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor test.com%2C8080%2C1.1733252326209 2024-12-03T18:58:46,213 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/aa1b2bc7-610d-7058-8e78-39c9d07d9768/WALs/test.com,8080,1/test.com%2C8080%2C1.1733252326209 2024-12-03T18:58:46,214 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42867:42867),(127.0.0.1/127.0.0.1:44203:44203)] 2024-12-03T18:58:46,215 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor test.com%2C8080%2C1.1733252326215 2024-12-03T18:58:46,219 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:58:46,219 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:58:46,219 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:58:46,219 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:58:46,219 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:58:46,220 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/aa1b2bc7-610d-7058-8e78-39c9d07d9768/WALs/test.com,8080,1/test.com%2C8080%2C1.1733252326209 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/aa1b2bc7-610d-7058-8e78-39c9d07d9768/WALs/test.com,8080,1/test.com%2C8080%2C1.1733252326215 2024-12-03T18:58:46,220 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44203:44203),(127.0.0.1/127.0.0.1:42867:42867)] 2024-12-03T18:58:46,220 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:42763/user/jenkins/test-data/aa1b2bc7-610d-7058-8e78-39c9d07d9768/WALs/test.com,8080,1/test.com%2C8080%2C1.1733252326209 is not closed yet, will try archiving it next time 2024-12-03T18:58:46,221 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:58:46,221 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:58:46,221 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39097 is added to blk_1073741835_1011 (size=93) 2024-12-03T18:58:46,221 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45741 is added to blk_1073741835_1011 (size=93) 2024-12-03T18:58:46,221 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:58:46,221 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:58:46,221 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:58:46,222 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:42763/user/jenkins/test-data/aa1b2bc7-610d-7058-8e78-39c9d07d9768/WALs/test.com,8080,1/test.com%2C8080%2C1.1733252326209 to hdfs://localhost:42763/user/jenkins/test-data/aa1b2bc7-610d-7058-8e78-39c9d07d9768/oldWALs/test.com%2C8080%2C1.1733252326209 2024-12-03T18:58:46,223 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45741 is added to blk_1073741836_1012 (size=93) 2024-12-03T18:58:46,224 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39097 is added to blk_1073741836_1012 (size=93) 2024-12-03T18:58:46,226 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/aa1b2bc7-610d-7058-8e78-39c9d07d9768/oldWALs 2024-12-03T18:58:46,226 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog test.com%2C8080%2C1:(num 1733252326215) 2024-12-03T18:58:46,226 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-03T18:58:46,226 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-03T18:58:46,226 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-03T18:58:46,226 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T18:58:46,226 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T18:58:46,226 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-03T18:58:46,226 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-03T18:58:46,226 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1783320009, stopped=false 2024-12-03T18:58:46,226 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=db5a5ccf5be8,38913,1733252324455 2024-12-03T18:58:46,248 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38913-0x1019c8f98060000, quorum=127.0.0.1:56511, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-03T18:58:46,248 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37685-0x1019c8f98060001, quorum=127.0.0.1:56511, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-03T18:58:46,248 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38913-0x1019c8f98060000, quorum=127.0.0.1:56511, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T18:58:46,248 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37685-0x1019c8f98060001, quorum=127.0.0.1:56511, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T18:58:46,248 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-03T18:58:46,248 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-03T18:58:46,248 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-03T18:58:46,249 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T18:58:46,249 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:37685-0x1019c8f98060001, quorum=127.0.0.1:56511, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-03T18:58:46,249 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:38913-0x1019c8f98060000, quorum=127.0.0.1:56511, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-03T18:58:46,249 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'db5a5ccf5be8,37685,1733252324610' ***** 2024-12-03T18:58:46,249 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-03T18:58:46,249 INFO [RS:0;db5a5ccf5be8:37685 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-03T18:58:46,249 INFO [RS:0;db5a5ccf5be8:37685 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-03T18:58:46,249 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-03T18:58:46,249 INFO [RS:0;db5a5ccf5be8:37685 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-03T18:58:46,249 INFO [RS:0;db5a5ccf5be8:37685 {}] regionserver.HRegionServer(959): stopping server db5a5ccf5be8,37685,1733252324610 2024-12-03T18:58:46,249 INFO [RS:0;db5a5ccf5be8:37685 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-03T18:58:46,249 INFO [RS:0;db5a5ccf5be8:37685 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;db5a5ccf5be8:37685. 2024-12-03T18:58:46,249 DEBUG [RS:0;db5a5ccf5be8:37685 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-03T18:58:46,249 DEBUG [RS:0;db5a5ccf5be8:37685 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T18:58:46,249 INFO [RS:0;db5a5ccf5be8:37685 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-03T18:58:46,249 INFO [RS:0;db5a5ccf5be8:37685 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-03T18:58:46,249 INFO [RS:0;db5a5ccf5be8:37685 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-03T18:58:46,249 INFO [RS:0;db5a5ccf5be8:37685 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-03T18:58:46,250 INFO [RS:0;db5a5ccf5be8:37685 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-12-03T18:58:46,250 DEBUG [RS:0;db5a5ccf5be8:37685 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-12-03T18:58:46,250 DEBUG [RS:0;db5a5ccf5be8:37685 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-12-03T18:58:46,250 DEBUG [RS_CLOSE_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-03T18:58:46,250 INFO [RS_CLOSE_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-03T18:58:46,250 DEBUG [RS_CLOSE_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-03T18:58:46,250 DEBUG [RS_CLOSE_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-03T18:58:46,250 DEBUG [RS_CLOSE_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-03T18:58:46,250 INFO [RS_CLOSE_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=74 B heapSize=1.22 KB 2024-12-03T18:58:46,266 DEBUG [RS_CLOSE_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42763/user/jenkins/test-data/aa1b2bc7-610d-7058-8e78-39c9d07d9768/data/hbase/meta/1588230740/.tmp/ns/2162e217e67a43048994c174f729fbd4 is 43, key is default/ns:d/1733252326114/Put/seqid=0 2024-12-03T18:58:46,272 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39097 is added to blk_1073741837_1013 (size=5153) 2024-12-03T18:58:46,272 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45741 is added to blk_1073741837_1013 (size=5153) 2024-12-03T18:58:46,272 INFO [RS_CLOSE_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:42763/user/jenkins/test-data/aa1b2bc7-610d-7058-8e78-39c9d07d9768/data/hbase/meta/1588230740/.tmp/ns/2162e217e67a43048994c174f729fbd4 2024-12-03T18:58:46,278 DEBUG [RS_CLOSE_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42763/user/jenkins/test-data/aa1b2bc7-610d-7058-8e78-39c9d07d9768/data/hbase/meta/1588230740/.tmp/ns/2162e217e67a43048994c174f729fbd4 as hdfs://localhost:42763/user/jenkins/test-data/aa1b2bc7-610d-7058-8e78-39c9d07d9768/data/hbase/meta/1588230740/ns/2162e217e67a43048994c174f729fbd4 2024-12-03T18:58:46,283 INFO [RS_CLOSE_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42763/user/jenkins/test-data/aa1b2bc7-610d-7058-8e78-39c9d07d9768/data/hbase/meta/1588230740/ns/2162e217e67a43048994c174f729fbd4, entries=2, sequenceid=6, filesize=5.0 K 2024-12-03T18:58:46,283 INFO [RS_CLOSE_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 33ms, sequenceid=6, compaction requested=false 2024-12-03T18:58:46,283 DEBUG [RS_CLOSE_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-03T18:58:46,287 DEBUG [RS_CLOSE_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42763/user/jenkins/test-data/aa1b2bc7-610d-7058-8e78-39c9d07d9768/data/hbase/meta/1588230740/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-03T18:58:46,288 DEBUG [RS_CLOSE_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-03T18:58:46,288 INFO [RS_CLOSE_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-03T18:58:46,288 DEBUG [RS_CLOSE_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733252326250Running coprocessor pre-close hooks at 1733252326250Disabling compacts and flushes for region at 1733252326250Disabling writes for close at 1733252326250Obtaining lock to block concurrent updates at 1733252326250Preparing flush snapshotting stores in 1588230740 at 1733252326250Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=74, getHeapSize=1184, getOffHeapSize=0, getCellsCount=2 at 1733252326250Flushing stores of hbase:meta,,1.1588230740 at 1733252326251 (+1 ms)Flushing 1588230740/ns: creating writer at 1733252326251Flushing 1588230740/ns: appending metadata at 1733252326265 (+14 ms)Flushing 1588230740/ns: closing flushed file at 1733252326265Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@720d38bd: reopening flushed file at 1733252326277 (+12 ms)Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 33ms, sequenceid=6, compaction requested=false at 1733252326283 (+6 ms)Writing region close event to WAL at 1733252326284 (+1 ms)Running coprocessor post-close hooks at 1733252326288 (+4 ms)Closed at 1733252326288 2024-12-03T18:58:46,288 DEBUG [RS_CLOSE_META-regionserver/db5a5ccf5be8:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-03T18:58:46,450 INFO [RS:0;db5a5ccf5be8:37685 {}] regionserver.HRegionServer(976): stopping server db5a5ccf5be8,37685,1733252324610; all regions closed. 2024-12-03T18:58:46,450 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:58:46,451 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:58:46,451 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:58:46,451 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:58:46,451 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:58:46,452 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:58:46,453 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:58:46,453 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:58:46,453 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:58:46,454 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:58:46,454 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:58:46,455 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:58:46,455 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:58:46,455 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:58:46,455 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:58:46,458 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45741 is added to blk_1073741834_1010 (size=1152) 2024-12-03T18:58:46,458 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39097 is added to blk_1073741834_1010 (size=1152) 2024-12-03T18:58:46,475 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:58:46,475 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:58:46,475 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:58:46,475 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:58:46,475 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:58:46,476 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:58:46,479 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:58:46,479 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:58:46,479 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:58:46,481 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:58:46,493 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,44293,1733252112865/db5a5ccf5be8%2C44293%2C1733252112865.1733252113091 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:58:46,515 INFO [regionserver/db5a5ccf5be8:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-03T18:58:46,515 INFO [regionserver/db5a5ccf5be8:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-03T18:58:46,564 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37681/user/jenkins/test-data/df582f83-9e4d-cdd9-ca69-252f438fade6/WALs/db5a5ccf5be8,46359,1733252111199/db5a5ccf5be8%2C46359%2C1733252111199.meta.1733252112663.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T18:58:46,861 DEBUG [RS:0;db5a5ccf5be8:37685 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/aa1b2bc7-610d-7058-8e78-39c9d07d9768/oldWALs 2024-12-03T18:58:46,861 INFO [RS:0;db5a5ccf5be8:37685 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog db5a5ccf5be8%2C37685%2C1733252324610.meta:.meta(num 1733252326038) 2024-12-03T18:58:46,861 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:58:46,861 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:58:46,861 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:58:46,861 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:58:46,861 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:58:46,863 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39097 is added to blk_1073741833_1009 (size=93) 2024-12-03T18:58:46,863 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45741 is added to blk_1073741833_1009 (size=93) 2024-12-03T18:58:46,864 DEBUG [RS:0;db5a5ccf5be8:37685 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/aa1b2bc7-610d-7058-8e78-39c9d07d9768/oldWALs 2024-12-03T18:58:46,864 INFO [RS:0;db5a5ccf5be8:37685 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog db5a5ccf5be8%2C37685%2C1733252324610:(num 1733252325584) 2024-12-03T18:58:46,864 DEBUG [RS:0;db5a5ccf5be8:37685 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T18:58:46,865 INFO [RS:0;db5a5ccf5be8:37685 {}] regionserver.LeaseManager(133): Closed leases 2024-12-03T18:58:46,865 INFO [RS:0;db5a5ccf5be8:37685 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-03T18:58:46,865 INFO [RS:0;db5a5ccf5be8:37685 {}] hbase.ChoreService(370): Chore service for: regionserver/db5a5ccf5be8:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-03T18:58:46,865 INFO [RS:0;db5a5ccf5be8:37685 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-03T18:58:46,865 INFO [regionserver/db5a5ccf5be8:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-03T18:58:46,865 INFO [RS:0;db5a5ccf5be8:37685 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:37685 2024-12-03T18:58:46,917 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38913-0x1019c8f98060000, quorum=127.0.0.1:56511, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-03T18:58:46,917 INFO [RS:0;db5a5ccf5be8:37685 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-03T18:58:46,917 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37685-0x1019c8f98060001, quorum=127.0.0.1:56511, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/db5a5ccf5be8,37685,1733252324610 2024-12-03T18:58:46,932 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [db5a5ccf5be8,37685,1733252324610] 2024-12-03T18:58:46,943 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/db5a5ccf5be8,37685,1733252324610 already deleted, retry=false 2024-12-03T18:58:46,943 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; db5a5ccf5be8,37685,1733252324610 expired; onlineServers=0 2024-12-03T18:58:46,943 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'db5a5ccf5be8,38913,1733252324455' ***** 2024-12-03T18:58:46,943 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-03T18:58:46,943 INFO [M:0;db5a5ccf5be8:38913 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-03T18:58:46,943 INFO [M:0;db5a5ccf5be8:38913 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-03T18:58:46,943 DEBUG [M:0;db5a5ccf5be8:38913 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-03T18:58:46,943 DEBUG [M:0;db5a5ccf5be8:38913 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-03T18:58:46,943 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-03T18:58:46,943 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster-HFileCleaner.large.0-1733252325353 {}] cleaner.HFileCleaner(306): Exit Thread[master/db5a5ccf5be8:0:becomeActiveMaster-HFileCleaner.large.0-1733252325353,5,FailOnTimeoutGroup] 2024-12-03T18:58:46,943 DEBUG [master/db5a5ccf5be8:0:becomeActiveMaster-HFileCleaner.small.0-1733252325353 {}] cleaner.HFileCleaner(306): Exit Thread[master/db5a5ccf5be8:0:becomeActiveMaster-HFileCleaner.small.0-1733252325353,5,FailOnTimeoutGroup] 2024-12-03T18:58:46,943 INFO [M:0;db5a5ccf5be8:38913 {}] hbase.ChoreService(370): Chore service for: master/db5a5ccf5be8:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-03T18:58:46,943 INFO [M:0;db5a5ccf5be8:38913 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-03T18:58:46,943 DEBUG [M:0;db5a5ccf5be8:38913 {}] master.HMaster(1795): Stopping service threads 2024-12-03T18:58:46,943 INFO [M:0;db5a5ccf5be8:38913 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-03T18:58:46,943 INFO [M:0;db5a5ccf5be8:38913 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-03T18:58:46,943 INFO [M:0;db5a5ccf5be8:38913 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-03T18:58:46,944 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-03T18:58:46,953 DEBUG [M:0;db5a5ccf5be8:38913 {}] zookeeper.ZKUtil(347): master:38913-0x1019c8f98060000, quorum=127.0.0.1:56511, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-03T18:58:46,953 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38913-0x1019c8f98060000, quorum=127.0.0.1:56511, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-03T18:58:46,953 WARN [M:0;db5a5ccf5be8:38913 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-03T18:58:46,953 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38913-0x1019c8f98060000, quorum=127.0.0.1:56511, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T18:58:46,954 INFO [M:0;db5a5ccf5be8:38913 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:42763/user/jenkins/test-data/aa1b2bc7-610d-7058-8e78-39c9d07d9768/.lastflushedseqids 2024-12-03T18:58:46,981 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39097 is added to blk_1073741838_1014 (size=99) 2024-12-03T18:58:46,982 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45741 is added to blk_1073741838_1014 (size=99) 2024-12-03T18:58:46,982 INFO [M:0;db5a5ccf5be8:38913 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-03T18:58:46,982 INFO [M:0;db5a5ccf5be8:38913 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-03T18:58:46,982 DEBUG [M:0;db5a5ccf5be8:38913 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-03T18:58:46,982 INFO [M:0;db5a5ccf5be8:38913 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T18:58:46,982 DEBUG [M:0;db5a5ccf5be8:38913 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T18:58:46,982 DEBUG [M:0;db5a5ccf5be8:38913 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-03T18:58:46,982 DEBUG [M:0;db5a5ccf5be8:38913 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T18:58:46,982 INFO [M:0;db5a5ccf5be8:38913 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=7.67 KB heapSize=11.34 KB 2024-12-03T18:58:46,986 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-03T18:58:46,998 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:58:46,998 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:58:46,998 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:58:46,998 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:58:46,999 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:58:46,999 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:58:46,999 DEBUG [M:0;db5a5ccf5be8:38913 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42763/user/jenkins/test-data/aa1b2bc7-610d-7058-8e78-39c9d07d9768/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/5fd29cfecc114551a184d3e51ba0ad62 is 82, key is hbase:meta,,1/info:regioninfo/1733252326067/Put/seqid=0 2024-12-03T18:58:47,000 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:58:47,000 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:58:47,000 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:58:47,000 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:58:47,017 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45741 is added to blk_1073741839_1015 (size=5672) 2024-12-03T18:58:47,018 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39097 is added to blk_1073741839_1015 (size=5672) 2024-12-03T18:58:47,021 INFO [M:0;db5a5ccf5be8:38913 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:42763/user/jenkins/test-data/aa1b2bc7-610d-7058-8e78-39c9d07d9768/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/5fd29cfecc114551a184d3e51ba0ad62 2024-12-03T18:58:47,033 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37685-0x1019c8f98060001, quorum=127.0.0.1:56511, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-03T18:58:47,033 INFO [RS:0;db5a5ccf5be8:37685 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-03T18:58:47,033 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37685-0x1019c8f98060001, quorum=127.0.0.1:56511, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-03T18:58:47,033 INFO [RS:0;db5a5ccf5be8:37685 {}] regionserver.HRegionServer(1031): Exiting; stopping=db5a5ccf5be8,37685,1733252324610; zookeeper connection closed. 2024-12-03T18:58:47,033 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@4300142 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@4300142 2024-12-03T18:58:47,033 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-12-03T18:58:47,035 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:58:47,036 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:58:47,036 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:58:47,036 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:58:47,037 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:58:47,037 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:58:47,042 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:58:47,043 DEBUG [M:0;db5a5ccf5be8:38913 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42763/user/jenkins/test-data/aa1b2bc7-610d-7058-8e78-39c9d07d9768/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/99ae2b291f104f9cb2d18d8fa56e1261 is 240, key is \x00\x00\x00\x00\x00\x00\x00\x01/proc:d/1733252326118/Put/seqid=0 2024-12-03T18:58:47,043 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:58:47,043 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:58:47,047 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T18:58:47,047 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39097 is added to blk_1073741840_1016 (size=5275) 2024-12-03T18:58:47,047 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45741 is added to blk_1073741840_1016 (size=5275) 2024-12-03T18:58:47,048 INFO [M:0;db5a5ccf5be8:38913 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.06 KB at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:42763/user/jenkins/test-data/aa1b2bc7-610d-7058-8e78-39c9d07d9768/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/99ae2b291f104f9cb2d18d8fa56e1261 2024-12-03T18:58:47,068 DEBUG [M:0;db5a5ccf5be8:38913 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42763/user/jenkins/test-data/aa1b2bc7-610d-7058-8e78-39c9d07d9768/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/6fb07792d0be450b8fd6a4295d20c7ee is 69, key is db5a5ccf5be8,37685,1733252324610/rs:state/1733252325432/Put/seqid=0 2024-12-03T18:58:47,073 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39097 is added to blk_1073741841_1017 (size=5156) 2024-12-03T18:58:47,073 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45741 is added to blk_1073741841_1017 (size=5156) 2024-12-03T18:58:47,074 INFO [M:0;db5a5ccf5be8:38913 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:42763/user/jenkins/test-data/aa1b2bc7-610d-7058-8e78-39c9d07d9768/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/6fb07792d0be450b8fd6a4295d20c7ee 2024-12-03T18:58:47,095 DEBUG [M:0;db5a5ccf5be8:38913 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42763/user/jenkins/test-data/aa1b2bc7-610d-7058-8e78-39c9d07d9768/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/01966dcac9b7422c81f27c7a1eab85ca is 52, key is load_balancer_on/state:d/1733252326206/Put/seqid=0 2024-12-03T18:58:47,099 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45741 is added to blk_1073741842_1018 (size=5056) 2024-12-03T18:58:47,099 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39097 is added to blk_1073741842_1018 (size=5056) 2024-12-03T18:58:47,099 INFO [M:0;db5a5ccf5be8:38913 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:42763/user/jenkins/test-data/aa1b2bc7-610d-7058-8e78-39c9d07d9768/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/01966dcac9b7422c81f27c7a1eab85ca 2024-12-03T18:58:47,105 DEBUG [M:0;db5a5ccf5be8:38913 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42763/user/jenkins/test-data/aa1b2bc7-610d-7058-8e78-39c9d07d9768/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/5fd29cfecc114551a184d3e51ba0ad62 as hdfs://localhost:42763/user/jenkins/test-data/aa1b2bc7-610d-7058-8e78-39c9d07d9768/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/5fd29cfecc114551a184d3e51ba0ad62 2024-12-03T18:58:47,111 INFO [M:0;db5a5ccf5be8:38913 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42763/user/jenkins/test-data/aa1b2bc7-610d-7058-8e78-39c9d07d9768/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/5fd29cfecc114551a184d3e51ba0ad62, entries=8, sequenceid=29, filesize=5.5 K 2024-12-03T18:58:47,112 DEBUG [M:0;db5a5ccf5be8:38913 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42763/user/jenkins/test-data/aa1b2bc7-610d-7058-8e78-39c9d07d9768/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/99ae2b291f104f9cb2d18d8fa56e1261 as hdfs://localhost:42763/user/jenkins/test-data/aa1b2bc7-610d-7058-8e78-39c9d07d9768/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/99ae2b291f104f9cb2d18d8fa56e1261 2024-12-03T18:58:47,116 INFO [M:0;db5a5ccf5be8:38913 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42763/user/jenkins/test-data/aa1b2bc7-610d-7058-8e78-39c9d07d9768/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/99ae2b291f104f9cb2d18d8fa56e1261, entries=3, sequenceid=29, filesize=5.2 K 2024-12-03T18:58:47,117 DEBUG [M:0;db5a5ccf5be8:38913 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42763/user/jenkins/test-data/aa1b2bc7-610d-7058-8e78-39c9d07d9768/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/6fb07792d0be450b8fd6a4295d20c7ee as hdfs://localhost:42763/user/jenkins/test-data/aa1b2bc7-610d-7058-8e78-39c9d07d9768/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/6fb07792d0be450b8fd6a4295d20c7ee 2024-12-03T18:58:47,121 INFO [M:0;db5a5ccf5be8:38913 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42763/user/jenkins/test-data/aa1b2bc7-610d-7058-8e78-39c9d07d9768/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/6fb07792d0be450b8fd6a4295d20c7ee, entries=1, sequenceid=29, filesize=5.0 K 2024-12-03T18:58:47,122 DEBUG [M:0;db5a5ccf5be8:38913 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42763/user/jenkins/test-data/aa1b2bc7-610d-7058-8e78-39c9d07d9768/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/01966dcac9b7422c81f27c7a1eab85ca as hdfs://localhost:42763/user/jenkins/test-data/aa1b2bc7-610d-7058-8e78-39c9d07d9768/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/01966dcac9b7422c81f27c7a1eab85ca 2024-12-03T18:58:47,126 INFO [M:0;db5a5ccf5be8:38913 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42763/user/jenkins/test-data/aa1b2bc7-610d-7058-8e78-39c9d07d9768/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/01966dcac9b7422c81f27c7a1eab85ca, entries=1, sequenceid=29, filesize=4.9 K 2024-12-03T18:58:47,128 INFO [M:0;db5a5ccf5be8:38913 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 146ms, sequenceid=29, compaction requested=false 2024-12-03T18:58:47,133 INFO [M:0;db5a5ccf5be8:38913 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T18:58:47,133 DEBUG [M:0;db5a5ccf5be8:38913 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733252326982Disabling compacts and flushes for region at 1733252326982Disabling writes for close at 1733252326982Obtaining lock to block concurrent updates at 1733252326982Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733252326982Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=7850, getHeapSize=11544, getOffHeapSize=0, getCellsCount=36 at 1733252326983 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733252326984 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733252326984Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733252326999 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733252326999Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733252327025 (+26 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733252327042 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733252327042Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733252327052 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733252327067 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733252327068 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1733252327079 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1733252327094 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1733252327094Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@40e210da: reopening flushed file at 1733252327104 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@52663262: reopening flushed file at 1733252327111 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@55eaee19: reopening flushed file at 1733252327116 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@381e980f: reopening flushed file at 1733252327121 (+5 ms)Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 146ms, sequenceid=29, compaction requested=false at 1733252327128 (+7 ms)Writing region close event to WAL at 1733252327133 (+5 ms)Closed at 1733252327133 2024-12-03T18:58:47,133 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:58:47,133 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:58:47,133 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:58:47,134 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:58:47,134 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T18:58:47,135 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45741 is added to blk_1073741830_1006 (size=10311) 2024-12-03T18:58:47,135 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39097 is added to blk_1073741830_1006 (size=10311) 2024-12-03T18:58:47,136 INFO [M:0;db5a5ccf5be8:38913 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-03T18:58:47,136 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-03T18:58:47,136 INFO [M:0;db5a5ccf5be8:38913 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:38913 2024-12-03T18:58:47,136 INFO [M:0;db5a5ccf5be8:38913 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-03T18:58:47,308 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38913-0x1019c8f98060000, quorum=127.0.0.1:56511, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-03T18:58:47,308 INFO [M:0;db5a5ccf5be8:38913 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-03T18:58:47,308 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38913-0x1019c8f98060000, quorum=127.0.0.1:56511, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-03T18:58:47,310 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2eb38aaf{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T18:58:47,311 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@59532081{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-03T18:58:47,311 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-03T18:58:47,311 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@fa9d1ff{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-03T18:58:47,311 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@36507c53{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e8d1147c-5b65-f1cd-ceb8-11e11ff6bdc8/hadoop.log.dir/,STOPPED} 2024-12-03T18:58:47,312 WARN [BP-272720403-172.17.0.2-1733252322483 heartbeating to localhost/127.0.0.1:42763 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-03T18:58:47,312 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-03T18:58:47,312 WARN [BP-272720403-172.17.0.2-1733252322483 heartbeating to localhost/127.0.0.1:42763 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-272720403-172.17.0.2-1733252322483 (Datanode Uuid 9e401a9f-c336-4957-ab88-2ba80342f70d) service to localhost/127.0.0.1:42763 2024-12-03T18:58:47,312 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-03T18:58:47,313 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e8d1147c-5b65-f1cd-ceb8-11e11ff6bdc8/cluster_48c401af-08dc-bb2f-7152-f0bca1b62d1d/data/data3/current/BP-272720403-172.17.0.2-1733252322483 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T18:58:47,313 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e8d1147c-5b65-f1cd-ceb8-11e11ff6bdc8/cluster_48c401af-08dc-bb2f-7152-f0bca1b62d1d/data/data4/current/BP-272720403-172.17.0.2-1733252322483 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T18:58:47,313 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-03T18:58:47,316 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@115f614b{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T18:58:47,316 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5d45b9b7{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-03T18:58:47,316 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-03T18:58:47,316 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@54008d53{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-03T18:58:47,316 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@23266789{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e8d1147c-5b65-f1cd-ceb8-11e11ff6bdc8/hadoop.log.dir/,STOPPED} 2024-12-03T18:58:47,318 WARN [BP-272720403-172.17.0.2-1733252322483 heartbeating to localhost/127.0.0.1:42763 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-03T18:58:47,318 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-03T18:58:47,318 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-03T18:58:47,318 WARN [BP-272720403-172.17.0.2-1733252322483 heartbeating to localhost/127.0.0.1:42763 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-272720403-172.17.0.2-1733252322483 (Datanode Uuid d282bb35-2459-4da2-90f9-5c35ea3d3a7e) service to localhost/127.0.0.1:42763 2024-12-03T18:58:47,318 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e8d1147c-5b65-f1cd-ceb8-11e11ff6bdc8/cluster_48c401af-08dc-bb2f-7152-f0bca1b62d1d/data/data1/current/BP-272720403-172.17.0.2-1733252322483 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T18:58:47,319 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e8d1147c-5b65-f1cd-ceb8-11e11ff6bdc8/cluster_48c401af-08dc-bb2f-7152-f0bca1b62d1d/data/data2/current/BP-272720403-172.17.0.2-1733252322483 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T18:58:47,319 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-03T18:58:47,324 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3a86d190{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-03T18:58:47,324 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7a47ab0e{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-03T18:58:47,324 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-03T18:58:47,325 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3910812a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-03T18:58:47,325 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@259cffcc{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e8d1147c-5b65-f1cd-ceb8-11e11ff6bdc8/hadoop.log.dir/,STOPPED} 2024-12-03T18:58:47,330 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-03T18:58:47,346 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-03T18:58:47,355 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnNothingWritten Thread=267 (was 227) Potentially hanging thread: nioEventLoopGroup-45-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:42763 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:42763 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.7@localhost:42763 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:42763 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:42763 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:42763 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: LeaseRenewer:jenkins@localhost:42763 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:42763 from jenkins.hfs.7 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) - Thread LEAK? -, OpenFileDescriptor=534 (was 518) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=253 (was 215) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=5636 (was 5654)